summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlice Ferrazzi <alicef@gentoo.org>2016-10-28 19:26:24 +0100
committerAlice Ferrazzi <alicef@gentoo.org>2016-10-28 19:26:24 +0100
commitae1e8223edd642dd90a57647ff6ad861df23e5fc (patch)
tree70f35cb615aa44bba1f0f25aa86aa2f54a14a2bb
parentLinux patch 4.4.27 (diff)
downloadlinux-patches-ae1e8223.tar.gz
linux-patches-ae1e8223.tar.bz2
linux-patches-ae1e8223.zip
Linux patch 4.4.284.4-30
-rw-r--r--0000_README4
-rw-r--r--1027_linux-4.4.28.patch4828
2 files changed, 4832 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 148210ee..356c33ea 100644
--- a/0000_README
+++ b/0000_README
@@ -151,6 +151,10 @@ Patch: 1026_linux-4.4.27.patch
From: http://www.kernel.org
Desc: Linux 4.4.27
+Patch: 1027_linux-4.4.28.patch
+From: http://www.kernel.org
+Desc: Linux 4.4.28
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1027_linux-4.4.28.patch b/1027_linux-4.4.28.patch
new file mode 100644
index 00000000..a653d5d9
--- /dev/null
+++ b/1027_linux-4.4.28.patch
@@ -0,0 +1,4828 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 0e4102ae1a61..c360f80c3473 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1371,7 +1371,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
+ controllers
+ i8042.notimeout [HW] Ignore timeout condition signalled by controller
+- i8042.reset [HW] Reset the controller during init and cleanup
++ i8042.reset [HW] Reset the controller during init, cleanup and
++ suspend-to-ram transitions, only during s2r
++ transitions, or never reset
++ Format: { 1 | Y | y | 0 | N | n }
++ 1, Y, y: always reset controller
++ 0, N, n: don't ever reset controller
++ Default: only on s2r transitions on x86; most other
++ architectures force reset to be always executed
+ i8042.unlock [HW] Unlock (ignore) the keylock
+ i8042.kbdreset [HW] Reset device connected to KBD port
+
+diff --git a/Makefile b/Makefile
+index b6ee4ce561f8..391294301aaf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 27
++SUBLEVEL = 28
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index 004b7f0bc76c..257b8699efde 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+ struct user_regs_struct uregs;
+
+ err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+- if (!err)
+- set_current_blocked(&set);
+-
+ err |= __copy_from_user(&uregs.scratch,
+ &(sf->uc.uc_mcontext.regs.scratch),
+ sizeof(sf->uc.uc_mcontext.regs.scratch));
++ if (err)
++ return err;
+
++ set_current_blocked(&set);
+ regs->bta = uregs.scratch.bta;
+ regs->lp_start = uregs.scratch.lp_start;
+ regs->lp_end = uregs.scratch.lp_end;
+@@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+ regs->r0 = uregs.scratch.r0;
+ regs->sp = uregs.scratch.sp;
+
+- return err;
++ return 0;
+ }
+
+ static inline int is_do_ss_needed(unsigned int magic)
+diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
+index 0a456bef8c79..8a336852eeba 100644
+--- a/arch/arm64/include/asm/percpu.h
++++ b/arch/arm64/include/asm/percpu.h
+@@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \
+ \
+ switch (size) { \
+ case 1: \
+- do { \
+- asm ("//__per_cpu_" #op "_1\n" \
+- "ldxrb %w[ret], %[ptr]\n" \
++ asm ("//__per_cpu_" #op "_1\n" \
++ "1: ldxrb %w[ret], %[ptr]\n" \
+ #asm_op " %w[ret], %w[ret], %w[val]\n" \
+- "stxrb %w[loop], %w[ret], %[ptr]\n" \
+- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+- [ptr] "+Q"(*(u8 *)ptr) \
+- : [val] "Ir" (val)); \
+- } while (loop); \
++ " stxrb %w[loop], %w[ret], %[ptr]\n" \
++ " cbnz %w[loop], 1b" \
++ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
++ [ptr] "+Q"(*(u8 *)ptr) \
++ : [val] "Ir" (val)); \
+ break; \
+ case 2: \
+- do { \
+- asm ("//__per_cpu_" #op "_2\n" \
+- "ldxrh %w[ret], %[ptr]\n" \
++ asm ("//__per_cpu_" #op "_2\n" \
++ "1: ldxrh %w[ret], %[ptr]\n" \
+ #asm_op " %w[ret], %w[ret], %w[val]\n" \
+- "stxrh %w[loop], %w[ret], %[ptr]\n" \
+- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+- [ptr] "+Q"(*(u16 *)ptr) \
+- : [val] "Ir" (val)); \
+- } while (loop); \
++ " stxrh %w[loop], %w[ret], %[ptr]\n" \
++ " cbnz %w[loop], 1b" \
++ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
++ [ptr] "+Q"(*(u16 *)ptr) \
++ : [val] "Ir" (val)); \
+ break; \
+ case 4: \
+- do { \
+- asm ("//__per_cpu_" #op "_4\n" \
+- "ldxr %w[ret], %[ptr]\n" \
++ asm ("//__per_cpu_" #op "_4\n" \
++ "1: ldxr %w[ret], %[ptr]\n" \
+ #asm_op " %w[ret], %w[ret], %w[val]\n" \
+- "stxr %w[loop], %w[ret], %[ptr]\n" \
+- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+- [ptr] "+Q"(*(u32 *)ptr) \
+- : [val] "Ir" (val)); \
+- } while (loop); \
++ " stxr %w[loop], %w[ret], %[ptr]\n" \
++ " cbnz %w[loop], 1b" \
++ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
++ [ptr] "+Q"(*(u32 *)ptr) \
++ : [val] "Ir" (val)); \
+ break; \
+ case 8: \
+- do { \
+- asm ("//__per_cpu_" #op "_8\n" \
+- "ldxr %[ret], %[ptr]\n" \
++ asm ("//__per_cpu_" #op "_8\n" \
++ "1: ldxr %[ret], %[ptr]\n" \
+ #asm_op " %[ret], %[ret], %[val]\n" \
+- "stxr %w[loop], %[ret], %[ptr]\n" \
+- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+- [ptr] "+Q"(*(u64 *)ptr) \
+- : [val] "Ir" (val)); \
+- } while (loop); \
++ " stxr %w[loop], %[ret], %[ptr]\n" \
++ " cbnz %w[loop], 1b" \
++ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
++ [ptr] "+Q"(*(u64 *)ptr) \
++ : [val] "Ir" (val)); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+@@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+
+ switch (size) {
+ case 1:
+- do {
+- asm ("//__percpu_xchg_1\n"
+- "ldxrb %w[ret], %[ptr]\n"
+- "stxrb %w[loop], %w[val], %[ptr]\n"
+- : [loop] "=&r"(loop), [ret] "=&r"(ret),
+- [ptr] "+Q"(*(u8 *)ptr)
+- : [val] "r" (val));
+- } while (loop);
++ asm ("//__percpu_xchg_1\n"
++ "1: ldxrb %w[ret], %[ptr]\n"
++ " stxrb %w[loop], %w[val], %[ptr]\n"
++ " cbnz %w[loop], 1b"
++ : [loop] "=&r"(loop), [ret] "=&r"(ret),
++ [ptr] "+Q"(*(u8 *)ptr)
++ : [val] "r" (val));
+ break;
+ case 2:
+- do {
+- asm ("//__percpu_xchg_2\n"
+- "ldxrh %w[ret], %[ptr]\n"
+- "stxrh %w[loop], %w[val], %[ptr]\n"
+- : [loop] "=&r"(loop), [ret] "=&r"(ret),
+- [ptr] "+Q"(*(u16 *)ptr)
+- : [val] "r" (val));
+- } while (loop);
++ asm ("//__percpu_xchg_2\n"
++ "1: ldxrh %w[ret], %[ptr]\n"
++ " stxrh %w[loop], %w[val], %[ptr]\n"
++ " cbnz %w[loop], 1b"
++ : [loop] "=&r"(loop), [ret] "=&r"(ret),
++ [ptr] "+Q"(*(u16 *)ptr)
++ : [val] "r" (val));
+ break;
+ case 4:
+- do {
+- asm ("//__percpu_xchg_4\n"
+- "ldxr %w[ret], %[ptr]\n"
+- "stxr %w[loop], %w[val], %[ptr]\n"
+- : [loop] "=&r"(loop), [ret] "=&r"(ret),
+- [ptr] "+Q"(*(u32 *)ptr)
+- : [val] "r" (val));
+- } while (loop);
++ asm ("//__percpu_xchg_4\n"
++ "1: ldxr %w[ret], %[ptr]\n"
++ " stxr %w[loop], %w[val], %[ptr]\n"
++ " cbnz %w[loop], 1b"
++ : [loop] "=&r"(loop), [ret] "=&r"(ret),
++ [ptr] "+Q"(*(u32 *)ptr)
++ : [val] "r" (val));
+ break;
+ case 8:
+- do {
+- asm ("//__percpu_xchg_8\n"
+- "ldxr %[ret], %[ptr]\n"
+- "stxr %w[loop], %[val], %[ptr]\n"
+- : [loop] "=&r"(loop), [ret] "=&r"(ret),
+- [ptr] "+Q"(*(u64 *)ptr)
+- : [val] "r" (val));
+- } while (loop);
++ asm ("//__percpu_xchg_8\n"
++ "1: ldxr %[ret], %[ptr]\n"
++ " stxr %w[loop], %[val], %[ptr]\n"
++ " cbnz %w[loop], 1b"
++ : [loop] "=&r"(loop), [ret] "=&r"(ret),
++ [ptr] "+Q"(*(u64 *)ptr)
++ : [val] "r" (val));
+ break;
+ default:
+ BUILD_BUG();
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index b685257926f0..20ceb5edf7b8 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -518,8 +518,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
+ b.lt 4f // Skip if no PMU present
+ mrs x0, pmcr_el0 // Disable debug access traps
+ ubfx x0, x0, #11, #5 // to EL2 and allow access to
+- msr mdcr_el2, x0 // all PMU counters from EL1
+ 4:
++ csel x0, xzr, x0, lt // all PMU counters from EL1
++ msr mdcr_el2, x0 // (if they exist)
+
+ /* Stage-2 translation */
+ msr vttbr_el2, xzr
+diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h
+index 470e365f04ea..8ff0a70865f6 100644
+--- a/arch/metag/include/asm/atomic.h
++++ b/arch/metag/include/asm/atomic.h
+@@ -39,11 +39,10 @@
+ #define atomic_dec(v) atomic_sub(1, (v))
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
++#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
+
+ #endif
+
+-#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
+-
+ #include <asm-generic/atomic64.h>
+
+ #endif /* __ASM_METAG_ATOMIC_H */
+diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
+index f6fc6aac5496..b6578611dddb 100644
+--- a/arch/mips/include/asm/ptrace.h
++++ b/arch/mips/include/asm/ptrace.h
+@@ -152,7 +152,7 @@ static inline int is_syscall_success(struct pt_regs *regs)
+
+ static inline long regs_return_value(struct pt_regs *regs)
+ {
+- if (is_syscall_success(regs))
++ if (is_syscall_success(regs) || !user_mode(regs))
+ return regs->regs[2];
+ else
+ return -regs->regs[2];
+diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
+index 090393aa0f20..6c7d78546eee 100644
+--- a/arch/mips/vdso/Makefile
++++ b/arch/mips/vdso/Makefile
+@@ -75,7 +75,7 @@ obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o)
+ $(obj-vdso): KBUILD_CFLAGS := $(cflags-vdso) $(native-abi)
+ $(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi)
+
+-$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(native-abi)
++$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi)
+
+ $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold)
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index 291cee28ccb6..c2c43f714684 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -83,10 +83,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
+
+ /* This is the size of the initially mapped kernel memory */
+-#ifdef CONFIG_64BIT
+-#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
++#if defined(CONFIG_64BIT)
++#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
+ #else
+-#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
++#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
+ #endif
+ #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
+
+diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
+index f7ea626e29c9..81d6f6391944 100644
+--- a/arch/parisc/kernel/setup.c
++++ b/arch/parisc/kernel/setup.c
+@@ -38,6 +38,7 @@
+ #include <linux/export.h>
+
+ #include <asm/processor.h>
++#include <asm/sections.h>
+ #include <asm/pdc.h>
+ #include <asm/led.h>
+ #include <asm/machdep.h> /* for pa7300lc_init() proto */
+@@ -140,6 +141,13 @@ void __init setup_arch(char **cmdline_p)
+ #endif
+ printk(KERN_CONT ".\n");
+
++ /*
++ * Check if initial kernel page mappings are sufficient.
++ * panic early if not, else we may access kernel functions
++ * and variables which can't be reached.
++ */
++ if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
++ panic("KERNEL_INITIAL_ORDER too small!");
+
+ pdc_console_init();
+
+diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
+index 308f29081d46..60771df10fde 100644
+--- a/arch/parisc/kernel/vmlinux.lds.S
++++ b/arch/parisc/kernel/vmlinux.lds.S
+@@ -88,8 +88,9 @@ SECTIONS
+ /* Start of data section */
+ _sdata = .;
+
+- RO_DATA_SECTION(8)
+-
++ /* Architecturally we need to keep __gp below 0x1000000 and thus
++ * in front of RO_DATA_SECTION() which stores lots of tracepoint
++ * and ftrace symbols. */
+ #ifdef CONFIG_64BIT
+ . = ALIGN(16);
+ /* Linkage tables */
+@@ -104,6 +105,8 @@ SECTIONS
+ }
+ #endif
+
++ RO_DATA_SECTION(8)
++
+ /* unwind info */
+ .PARISC.unwind : {
+ __start___unwind = .;
+diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
+index 2f01c4a0d8a0..7612eeb31da1 100644
+--- a/arch/powerpc/kernel/vdso64/datapage.S
++++ b/arch/powerpc/kernel/vdso64/datapage.S
+@@ -59,7 +59,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
+ bl V_LOCAL_FUNC(__get_datapage)
+ mtlr r12
+ addi r3,r3,CFG_SYSCALL_MAP64
+- cmpli cr0,r4,0
++ cmpldi cr0,r4,0
+ crclr cr0*4+so
+ beqlr
+ li r0,__NR_syscalls
+diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
+index a76b4af37ef2..382021324883 100644
+--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
++++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
+@@ -145,7 +145,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
+ bne cr0,99f
+
+ li r3,0
+- cmpli cr0,r4,0
++ cmpldi cr0,r4,0
+ crclr cr0*4+so
+ beqlr
+ lis r5,CLOCK_REALTIME_RES@h
+diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
+index f09899e35991..7b22624f332c 100644
+--- a/arch/powerpc/lib/copyuser_64.S
++++ b/arch/powerpc/lib/copyuser_64.S
+@@ -359,6 +359,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
+ addi r3,r3,8
+ 171:
+ 177:
++179:
+ addi r3,r3,8
+ 370:
+ 372:
+@@ -373,7 +374,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
+ 173:
+ 174:
+ 175:
+-179:
+ 181:
+ 184:
+ 186:
+diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
+index 6527882ce05e..ddfd2740a1b5 100644
+--- a/arch/powerpc/mm/copro_fault.c
++++ b/arch/powerpc/mm/copro_fault.c
+@@ -106,6 +106,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
+ switch (REGION_ID(ea)) {
+ case USER_REGION_ID:
+ pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
++ if (mm == NULL)
++ return 1;
+ psize = get_slice_psize(mm, ea);
+ ssize = user_segment_size(ea);
+ vsid = get_vsid(mm->context.id, ea, ssize);
+diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
+index 2ba602591a20..ba0cae69a396 100644
+--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
++++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
+@@ -1163,7 +1163,7 @@ static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
+ return;
+ }
+
+- switch (data->type) {
++ switch (be16_to_cpu(data->type)) {
+ case OPAL_P7IOC_DIAG_TYPE_RGC:
+ pr_info("P7IOC diag-data for RGC\n\n");
+ pnv_eeh_dump_hub_diag_common(data);
+@@ -1395,7 +1395,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
+
+ /* Try best to clear it */
+ opal_pci_eeh_freeze_clear(phb->opal_id,
+- frozen_pe_no,
++ be64_to_cpu(frozen_pe_no),
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+ ret = EEH_NEXT_ERR_NONE;
+ } else if ((*pe)->state & EEH_PE_ISOLATED ||
+diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
+index ad8c3f4a5e0b..dd5e0f3b1b5d 100644
+--- a/arch/powerpc/platforms/powernv/pci.c
++++ b/arch/powerpc/platforms/powernv/pci.c
+@@ -197,8 +197,8 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
+ be64_to_cpu(data->dma1ErrorLog1));
+
+ for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
+- if ((data->pestA[i] >> 63) == 0 &&
+- (data->pestB[i] >> 63) == 0)
++ if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
++ (be64_to_cpu(data->pestB[i]) >> 63) == 0)
+ continue;
+
+ pr_info("PE[%3d] A/B: %016llx %016llx\n",
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index b7a67e3d2201..3ae43282460e 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -406,7 +406,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
+ unsigned long *vpn, int count,
+ int psize, int ssize)
+ {
+- unsigned long param[8];
++ unsigned long param[PLPAR_HCALL9_BUFSIZE];
+ int i = 0, pix = 0, rc;
+ unsigned long flags = 0;
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+@@ -523,7 +523,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
+ unsigned long flags = 0;
+ struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+- unsigned long param[9];
++ unsigned long param[PLPAR_HCALL9_BUFSIZE];
+ unsigned long hash, index, shift, hidx, slot;
+ real_pte_t pte;
+ int psize, ssize;
+diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
+index a2e6ef32e054..0a2031618f7f 100644
+--- a/arch/s390/include/asm/tlbflush.h
++++ b/arch/s390/include/asm/tlbflush.h
+@@ -81,7 +81,8 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
+ }
+
+ /*
+- * Flush TLB entries for a specific ASCE on all CPUs.
++ * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
++ * when more than one asce (e.g. gmap) ran on this mm.
+ */
+ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
+ {
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index 471a370a527b..8345ae1f117d 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc);
+ static void gmap_flush_tlb(struct gmap *gmap)
+ {
+ if (MACHINE_HAS_IDTE)
+- __tlb_flush_asce(gmap->mm, gmap->asce);
++ __tlb_flush_idte(gmap->asce);
+ else
+ __tlb_flush_global();
+ }
+@@ -205,7 +205,7 @@ void gmap_free(struct gmap *gmap)
+
+ /* Flush tlb. */
+ if (MACHINE_HAS_IDTE)
+- __tlb_flush_asce(gmap->mm, gmap->asce);
++ __tlb_flush_idte(gmap->asce);
+ else
+ __tlb_flush_global();
+
+diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
+index 38b3ead7222d..52a2526c3fbe 100644
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -347,7 +347,7 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
+ * continue building up new bios map based on this
+ * information
+ */
+- if (current_type != last_type) {
++ if (current_type != last_type || current_type == E820_PRAM) {
+ if (last_type != 0) {
+ new_bios[new_bios_entry].size =
+ change_point[chgidx]->addr - last_addr;
+diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
+index 88d0a92d3f94..3aab53f8cad2 100644
+--- a/arch/x86/kvm/ioapic.c
++++ b/arch/x86/kvm/ioapic.c
+@@ -580,7 +580,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
+ ioapic->irr = 0;
+ ioapic->irr_delivered = 0;
+ ioapic->id = 0;
+- memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
++ memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
+ rtc_irq_eoi_tracking_reset(ioapic);
+ }
+
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 5a37188b559f..9d359e05fad7 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1331,10 +1331,8 @@ int blkcg_policy_register(struct blkcg_policy *pol)
+ struct blkcg_policy_data *cpd;
+
+ cpd = pol->cpd_alloc_fn(GFP_KERNEL);
+- if (!cpd) {
+- mutex_unlock(&blkcg_pol_mutex);
++ if (!cpd)
+ goto err_free_cpds;
+- }
+
+ blkcg->cpd[pol->plid] = cpd;
+ cpd->blkcg = blkcg;
+diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
+index 758acabf2d81..8f3056cd0399 100644
+--- a/crypto/asymmetric_keys/pkcs7_parser.c
++++ b/crypto/asymmetric_keys/pkcs7_parser.c
+@@ -547,9 +547,7 @@ int pkcs7_sig_note_set_of_authattrs(void *context, size_t hdrlen,
+ struct pkcs7_signed_info *sinfo = ctx->sinfo;
+
+ if (!test_bit(sinfo_has_content_type, &sinfo->aa_set) ||
+- !test_bit(sinfo_has_message_digest, &sinfo->aa_set) ||
+- (ctx->msg->data_type == OID_msIndirectData &&
+- !test_bit(sinfo_has_ms_opus_info, &sinfo->aa_set))) {
++ !test_bit(sinfo_has_message_digest, &sinfo->aa_set)) {
+ pr_warn("Missing required AuthAttr\n");
+ return -EBADMSG;
+ }
+diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
+index 5230e8449d30..c097f477c74c 100644
+--- a/drivers/acpi/nfit.c
++++ b/drivers/acpi/nfit.c
+@@ -1806,6 +1806,9 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
+
+ dev_dbg(dev, "%s: event: %d\n", __func__, event);
+
++ if (event != NFIT_NOTIFY_UPDATE)
++ return;
++
+ device_lock(dev);
+ if (!dev->driver) {
+ /* dev->driver may be null if we're being removed */
+diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
+index 3d549a383659..13d6ec1ff055 100644
+--- a/drivers/acpi/nfit.h
++++ b/drivers/acpi/nfit.h
+@@ -45,6 +45,10 @@ enum {
+ ND_BLK_DCR_LATCH = 2,
+ };
+
++enum nfit_root_notifiers {
++ NFIT_NOTIFY_UPDATE = 0x80,
++};
++
+ struct nfit_spa {
+ struct acpi_nfit_system_address *spa;
+ struct list_head list;
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 176b59f5bc47..ba66330cea67 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -96,7 +96,7 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
+ int ret;
+
+ ret = of_irq_get(dev->dev.of_node, num);
+- if (ret >= 0 || ret == -EPROBE_DEFER)
++ if (ret > 0 || ret == -EPROBE_DEFER)
+ return ret;
+ }
+
+@@ -154,7 +154,7 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
+ int ret;
+
+ ret = of_irq_get_byname(dev->dev.of_node, name);
+- if (ret >= 0 || ret == -EPROBE_DEFER)
++ if (ret > 0 || ret == -EPROBE_DEFER)
+ return ret;
+ }
+
+diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
+index c1935081d34a..aab64205d866 100644
+--- a/drivers/clk/imx/clk-imx6q.c
++++ b/drivers/clk/imx/clk-imx6q.c
+@@ -550,6 +550,24 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ if (IS_ENABLED(CONFIG_PCI_IMX6))
+ clk_set_parent(clk[IMX6QDL_CLK_LVDS1_SEL], clk[IMX6QDL_CLK_SATA_REF_100M]);
+
++ /*
++ * Initialize the GPU clock muxes, so that the maximum specified clock
++ * rates for the respective SoC are not exceeded.
++ */
++ if (clk_on_imx6dl()) {
++ clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL],
++ clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
++ clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL],
++ clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
++ } else if (clk_on_imx6q()) {
++ clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL],
++ clk[IMX6QDL_CLK_MMDC_CH0_AXI]);
++ clk_set_parent(clk[IMX6QDL_CLK_GPU3D_SHADER_SEL],
++ clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
++ clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL],
++ clk[IMX6QDL_CLK_PLL3_USB_OTG]);
++ }
++
+ imx_register_uart_clocks(uart_clks);
+ }
+ CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 6e80e4298274..7ff8b15a3422 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -285,14 +285,14 @@ static void intel_pstate_hwp_set(void)
+ int min, hw_min, max, hw_max, cpu, range, adj_range;
+ u64 value, cap;
+
+- rdmsrl(MSR_HWP_CAPABILITIES, cap);
+- hw_min = HWP_LOWEST_PERF(cap);
+- hw_max = HWP_HIGHEST_PERF(cap);
+- range = hw_max - hw_min;
+-
+ get_online_cpus();
+
+ for_each_online_cpu(cpu) {
++ rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
++ hw_min = HWP_LOWEST_PERF(cap);
++ hw_max = HWP_HIGHEST_PERF(cap);
++ range = hw_max - hw_min;
++
+ rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+ adj_range = limits->min_perf_pct * range / 100;
+ min = hw_min + adj_range;
+diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
+index 48ef368347ab..9e02cb6afb0b 100644
+--- a/drivers/gpio/gpio-mpc8xxx.c
++++ b/drivers/gpio/gpio-mpc8xxx.c
+@@ -329,7 +329,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
+ irq_hw_number_t hwirq)
+ {
+ irq_set_chip_data(irq, h->host_data);
+- irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
++ irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq);
+
+ return 0;
+ }
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index be5b399da5d3..43482ae1e049 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1163,6 +1163,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
+ },
+ },
++ {
++ /* Fujitsu H760 also has a middle button */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
++ },
++ },
+ #endif
+ { }
+ };
+@@ -1507,10 +1514,10 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
+ },
+ },
+ {
+- /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
++ /* Fujitsu H760 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
+ },
+ },
+ {
+@@ -1521,6 +1528,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
+ },
+ },
+ {
++ /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
++ },
++ },
++ {
++ /* Fujitsu LIFEBOOK E556 does not work with crc_enabled == 0 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"),
++ },
++ },
++ {
+ /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
+index a5eed2ade53d..34da81c006b6 100644
+--- a/drivers/input/serio/i8042-io.h
++++ b/drivers/input/serio/i8042-io.h
+@@ -81,7 +81,7 @@ static inline int i8042_platform_init(void)
+ return -EBUSY;
+ #endif
+
+- i8042_reset = 1;
++ i8042_reset = I8042_RESET_ALWAYS;
+ return 0;
+ }
+
+diff --git a/drivers/input/serio/i8042-ip22io.h b/drivers/input/serio/i8042-ip22io.h
+index ee1ad27d6ed0..08a1c10a1448 100644
+--- a/drivers/input/serio/i8042-ip22io.h
++++ b/drivers/input/serio/i8042-ip22io.h
+@@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
+ return -EBUSY;
+ #endif
+
+- i8042_reset = 1;
++ i8042_reset = I8042_RESET_ALWAYS;
+
+ return 0;
+ }
+diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h
+index f708c75d16f1..1aabea43329e 100644
+--- a/drivers/input/serio/i8042-ppcio.h
++++ b/drivers/input/serio/i8042-ppcio.h
+@@ -44,7 +44,7 @@ static inline void i8042_write_command(int val)
+
+ static inline int i8042_platform_init(void)
+ {
+- i8042_reset = 1;
++ i8042_reset = I8042_RESET_ALWAYS;
+ return 0;
+ }
+
+diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
+index afcd1c1a05b2..6231d63860ee 100644
+--- a/drivers/input/serio/i8042-sparcio.h
++++ b/drivers/input/serio/i8042-sparcio.h
+@@ -130,7 +130,7 @@ static int __init i8042_platform_init(void)
+ }
+ }
+
+- i8042_reset = 1;
++ i8042_reset = I8042_RESET_ALWAYS;
+
+ return 0;
+ }
+diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h
+index 73f5cc124a36..455747552f85 100644
+--- a/drivers/input/serio/i8042-unicore32io.h
++++ b/drivers/input/serio/i8042-unicore32io.h
+@@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
+ if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042"))
+ return -EBUSY;
+
+- i8042_reset = 1;
++ i8042_reset = I8042_RESET_ALWAYS;
+ return 0;
+ }
+
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 68f5f4a0f1e7..f4bfb4b2d50a 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -510,6 +510,90 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
+ { }
+ };
+
++/*
++ * On some Asus laptops, just running self tests cause problems.
++ */
++static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "R409L"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"),
++ },
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"),
++ },
++ },
++ { }
++};
+ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
+ {
+ /* MSI Wind U-100 */
+@@ -1072,12 +1156,18 @@ static int __init i8042_platform_init(void)
+ return retval;
+
+ #if defined(__ia64__)
+- i8042_reset = true;
++ i8042_reset = I8042_RESET_ALWAYS;
+ #endif
+
+ #ifdef CONFIG_X86
+- if (dmi_check_system(i8042_dmi_reset_table))
+- i8042_reset = true;
++ /* Honor module parameter when value is not default */
++ if (i8042_reset == I8042_RESET_DEFAULT) {
++ if (dmi_check_system(i8042_dmi_reset_table))
++ i8042_reset = I8042_RESET_ALWAYS;
++
++ if (dmi_check_system(i8042_dmi_noselftest_table))
++ i8042_reset = I8042_RESET_NEVER;
++ }
+
+ if (dmi_check_system(i8042_dmi_noloop_table))
+ i8042_noloop = true;
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index 405252a884dd..89abfdb539ac 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -48,9 +48,39 @@ static bool i8042_unlock;
+ module_param_named(unlock, i8042_unlock, bool, 0);
+ MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
+
+-static bool i8042_reset;
+-module_param_named(reset, i8042_reset, bool, 0);
+-MODULE_PARM_DESC(reset, "Reset controller during init and cleanup.");
++enum i8042_controller_reset_mode {
++ I8042_RESET_NEVER,
++ I8042_RESET_ALWAYS,
++ I8042_RESET_ON_S2RAM,
++#define I8042_RESET_DEFAULT I8042_RESET_ON_S2RAM
++};
++static enum i8042_controller_reset_mode i8042_reset = I8042_RESET_DEFAULT;
++static int i8042_set_reset(const char *val, const struct kernel_param *kp)
++{
++ enum i8042_controller_reset_mode *arg = kp->arg;
++ int error;
++ bool reset;
++
++ if (val) {
++ error = kstrtobool(val, &reset);
++ if (error)
++ return error;
++ } else {
++ reset = true;
++ }
++
++ *arg = reset ? I8042_RESET_ALWAYS : I8042_RESET_NEVER;
++ return 0;
++}
++
++static const struct kernel_param_ops param_ops_reset_param = {
++ .flags = KERNEL_PARAM_OPS_FL_NOARG,
++ .set = i8042_set_reset,
++};
++#define param_check_reset_param(name, p) \
++ __param_check(name, p, enum i8042_controller_reset_mode)
++module_param_named(reset, i8042_reset, reset_param, 0);
++MODULE_PARM_DESC(reset, "Reset controller on resume, cleanup or both");
+
+ static bool i8042_direct;
+ module_param_named(direct, i8042_direct, bool, 0);
+@@ -1019,7 +1049,7 @@ static int i8042_controller_init(void)
+ * Reset the controller and reset CRT to the original value set by BIOS.
+ */
+
+-static void i8042_controller_reset(bool force_reset)
++static void i8042_controller_reset(bool s2r_wants_reset)
+ {
+ i8042_flush();
+
+@@ -1044,8 +1074,10 @@ static void i8042_controller_reset(bool force_reset)
+ * Reset the controller if requested.
+ */
+
+- if (i8042_reset || force_reset)
++ if (i8042_reset == I8042_RESET_ALWAYS ||
++ (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
+ i8042_controller_selftest();
++ }
+
+ /*
+ * Restore the original control register setting.
+@@ -1110,7 +1142,7 @@ static void i8042_dritek_enable(void)
+ * before suspending.
+ */
+
+-static int i8042_controller_resume(bool force_reset)
++static int i8042_controller_resume(bool s2r_wants_reset)
+ {
+ int error;
+
+@@ -1118,7 +1150,8 @@ static int i8042_controller_resume(bool force_reset)
+ if (error)
+ return error;
+
+- if (i8042_reset || force_reset) {
++ if (i8042_reset == I8042_RESET_ALWAYS ||
++ (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
+ error = i8042_controller_selftest();
+ if (error)
+ return error;
+@@ -1195,7 +1228,7 @@ static int i8042_pm_resume_noirq(struct device *dev)
+
+ static int i8042_pm_resume(struct device *dev)
+ {
+- bool force_reset;
++ bool want_reset;
+ int i;
+
+ for (i = 0; i < I8042_NUM_PORTS; i++) {
+@@ -1218,9 +1251,9 @@ static int i8042_pm_resume(struct device *dev)
+ * off control to the platform firmware, otherwise we can simply restore
+ * the mode.
+ */
+- force_reset = pm_resume_via_firmware();
++ want_reset = pm_resume_via_firmware();
+
+- return i8042_controller_resume(force_reset);
++ return i8042_controller_resume(want_reset);
+ }
+
+ static int i8042_pm_thaw(struct device *dev)
+@@ -1482,7 +1515,7 @@ static int __init i8042_probe(struct platform_device *dev)
+
+ i8042_platform_device = dev;
+
+- if (i8042_reset) {
++ if (i8042_reset == I8042_RESET_ALWAYS) {
+ error = i8042_controller_selftest();
+ if (error)
+ return error;
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 44aa57edf207..e33c729b9f48 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -142,7 +142,7 @@ static void gic_enable_redist(bool enable)
+ return; /* No PM support in this redistributor */
+ }
+
+- while (count--) {
++ while (--count) {
+ val = readl_relaxed(rbase + GICR_WAKER);
+ if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
+ break;
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 51eda7235e32..5cac11d7a876 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -112,8 +112,7 @@ struct iv_tcw_private {
+ * and encrypts / decrypts at the same time.
+ */
+ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
+- DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
+- DM_CRYPT_EXIT_THREAD};
++ DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
+
+ /*
+ * The fields in here must be read only after initialization.
+@@ -1204,18 +1203,20 @@ continue_locked:
+ if (!RB_EMPTY_ROOT(&cc->write_tree))
+ goto pop_from_list;
+
+- if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
+- spin_unlock_irq(&cc->write_thread_wait.lock);
+- break;
+- }
+-
+- __set_current_state(TASK_INTERRUPTIBLE);
++ set_current_state(TASK_INTERRUPTIBLE);
+ __add_wait_queue(&cc->write_thread_wait, &wait);
+
+ spin_unlock_irq(&cc->write_thread_wait.lock);
+
++ if (unlikely(kthread_should_stop())) {
++ set_task_state(current, TASK_RUNNING);
++ remove_wait_queue(&cc->write_thread_wait, &wait);
++ break;
++ }
++
+ schedule();
+
++ set_task_state(current, TASK_RUNNING);
+ spin_lock_irq(&cc->write_thread_wait.lock);
+ __remove_wait_queue(&cc->write_thread_wait, &wait);
+ goto continue_locked;
+@@ -1530,13 +1531,8 @@ static void crypt_dtr(struct dm_target *ti)
+ if (!cc)
+ return;
+
+- if (cc->write_thread) {
+- spin_lock_irq(&cc->write_thread_wait.lock);
+- set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
+- wake_up_locked(&cc->write_thread_wait);
+- spin_unlock_irq(&cc->write_thread_wait.lock);
++ if (cc->write_thread)
+ kthread_stop(cc->write_thread);
+- }
+
+ if (cc->io_queue)
+ destroy_workqueue(cc->io_queue);
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index cfa29f574c2a..5b2ef966012b 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -1220,10 +1220,10 @@ static void activate_path(struct work_struct *work)
+ {
+ struct pgpath *pgpath =
+ container_of(work, struct pgpath, activate_path.work);
++ struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
+
+- if (pgpath->is_active)
+- scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
+- pg_init_done, pgpath);
++ if (pgpath->is_active && !blk_queue_dying(q))
++ scsi_dh_activate(q, pg_init_done, pgpath);
+ else
+ pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
+ }
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index a42729ebf272..84aa8b1d0480 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2869,6 +2869,7 @@ EXPORT_SYMBOL_GPL(dm_device_name);
+
+ static void __dm_destroy(struct mapped_device *md, bool wait)
+ {
++ struct request_queue *q = dm_get_md_queue(md);
+ struct dm_table *map;
+ int srcu_idx;
+
+@@ -2879,6 +2880,10 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
+ set_bit(DMF_FREEING, &md->flags);
+ spin_unlock(&_minor_lock);
+
++ spin_lock_irq(q->queue_lock);
++ queue_flag_set(QUEUE_FLAG_DYING, q);
++ spin_unlock_irq(q->queue_lock);
++
+ if (dm_request_based(md) && md->kworker_task)
+ flush_kthread_worker(&md->kworker);
+
+@@ -3245,10 +3250,11 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
+
+ int dm_resume(struct mapped_device *md)
+ {
+- int r = -EINVAL;
++ int r;
+ struct dm_table *map = NULL;
+
+ retry:
++ r = -EINVAL;
+ mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
+
+ if (!dm_suspended_md(md))
+@@ -3272,8 +3278,6 @@ retry:
+ goto out;
+
+ clear_bit(DMF_SUSPENDED, &md->flags);
+-
+- r = 0;
+ out:
+ mutex_unlock(&md->suspend_lock);
+
+diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
+index cfc005ee11d8..7fc72de2434c 100644
+--- a/drivers/media/dvb-frontends/mb86a20s.c
++++ b/drivers/media/dvb-frontends/mb86a20s.c
+@@ -71,25 +71,27 @@ static struct regdata mb86a20s_init1[] = {
+ };
+
+ static struct regdata mb86a20s_init2[] = {
+- { 0x28, 0x22 }, { 0x29, 0x00 }, { 0x2a, 0x1f }, { 0x2b, 0xf0 },
++ { 0x50, 0xd1 }, { 0x51, 0x22 },
++ { 0x39, 0x01 },
++ { 0x71, 0x00 },
+ { 0x3b, 0x21 },
+- { 0x3c, 0x38 },
++ { 0x3c, 0x3a },
+ { 0x01, 0x0d },
+- { 0x04, 0x08 }, { 0x05, 0x03 },
++ { 0x04, 0x08 }, { 0x05, 0x05 },
+ { 0x04, 0x0e }, { 0x05, 0x00 },
+- { 0x04, 0x0f }, { 0x05, 0x37 },
+- { 0x04, 0x0b }, { 0x05, 0x78 },
++ { 0x04, 0x0f }, { 0x05, 0x14 },
++ { 0x04, 0x0b }, { 0x05, 0x8c },
+ { 0x04, 0x00 }, { 0x05, 0x00 },
+- { 0x04, 0x01 }, { 0x05, 0x1e },
+- { 0x04, 0x02 }, { 0x05, 0x07 },
+- { 0x04, 0x03 }, { 0x05, 0xd0 },
++ { 0x04, 0x01 }, { 0x05, 0x07 },
++ { 0x04, 0x02 }, { 0x05, 0x0f },
++ { 0x04, 0x03 }, { 0x05, 0xa0 },
+ { 0x04, 0x09 }, { 0x05, 0x00 },
+ { 0x04, 0x0a }, { 0x05, 0xff },
+- { 0x04, 0x27 }, { 0x05, 0x00 },
++ { 0x04, 0x27 }, { 0x05, 0x64 },
+ { 0x04, 0x28 }, { 0x05, 0x00 },
+- { 0x04, 0x1e }, { 0x05, 0x00 },
+- { 0x04, 0x29 }, { 0x05, 0x64 },
+- { 0x04, 0x32 }, { 0x05, 0x02 },
++ { 0x04, 0x1e }, { 0x05, 0xff },
++ { 0x04, 0x29 }, { 0x05, 0x0a },
++ { 0x04, 0x32 }, { 0x05, 0x0a },
+ { 0x04, 0x14 }, { 0x05, 0x02 },
+ { 0x04, 0x04 }, { 0x05, 0x00 },
+ { 0x04, 0x05 }, { 0x05, 0x22 },
+@@ -97,8 +99,6 @@ static struct regdata mb86a20s_init2[] = {
+ { 0x04, 0x07 }, { 0x05, 0xd8 },
+ { 0x04, 0x12 }, { 0x05, 0x00 },
+ { 0x04, 0x13 }, { 0x05, 0xff },
+- { 0x04, 0x15 }, { 0x05, 0x4e },
+- { 0x04, 0x16 }, { 0x05, 0x20 },
+
+ /*
+ * On this demod, when the bit count reaches the count below,
+@@ -152,42 +152,36 @@ static struct regdata mb86a20s_init2[] = {
+ { 0x50, 0x51 }, { 0x51, 0x04 }, /* MER symbol 4 */
+ { 0x45, 0x04 }, /* CN symbol 4 */
+ { 0x48, 0x04 }, /* CN manual mode */
+-
++ { 0x50, 0xd5 }, { 0x51, 0x01 },
+ { 0x50, 0xd6 }, { 0x51, 0x1f },
+ { 0x50, 0xd2 }, { 0x51, 0x03 },
+- { 0x50, 0xd7 }, { 0x51, 0xbf },
+- { 0x28, 0x74 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xff },
+- { 0x28, 0x46 }, { 0x29, 0x00 }, { 0x2a, 0x1a }, { 0x2b, 0x0c },
+-
+- { 0x04, 0x40 }, { 0x05, 0x00 },
+- { 0x28, 0x00 }, { 0x2b, 0x08 },
+- { 0x28, 0x05 }, { 0x2b, 0x00 },
++ { 0x50, 0xd7 }, { 0x51, 0x3f },
+ { 0x1c, 0x01 },
+- { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x1f },
+- { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x18 },
+- { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x12 },
+- { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x30 },
+- { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x37 },
+- { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
+- { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x09 },
+- { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x06 },
+- { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7b },
+- { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x76 },
+- { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7d },
+- { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x08 },
+- { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0b },
+- { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
+- { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf2 },
+- { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf3 },
+- { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x05 },
+- { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
+- { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
+- { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xef },
+- { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xd8 },
+- { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xf1 },
+- { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x3d },
+- { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x94 },
+- { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xba },
++ { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x03 },
++ { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0d },
++ { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
++ { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x01 },
++ { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x21 },
++ { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x29 },
++ { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
++ { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x31 },
++ { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0e },
++ { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x4e },
++ { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x46 },
++ { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
++ { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x56 },
++ { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x35 },
++ { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbe },
++ { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0x84 },
++ { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x03 }, { 0x2b, 0xee },
++ { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x98 },
++ { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x9f },
++ { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xb2 },
++ { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0xc2 },
++ { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0x4a },
++ { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbc },
++ { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x04 }, { 0x2b, 0xba },
++ { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0x14 },
+ { 0x50, 0x1e }, { 0x51, 0x5d },
+ { 0x50, 0x22 }, { 0x51, 0x00 },
+ { 0x50, 0x23 }, { 0x51, 0xc8 },
+@@ -196,9 +190,7 @@ static struct regdata mb86a20s_init2[] = {
+ { 0x50, 0x26 }, { 0x51, 0x00 },
+ { 0x50, 0x27 }, { 0x51, 0xc3 },
+ { 0x50, 0x39 }, { 0x51, 0x02 },
+- { 0xec, 0x0f },
+- { 0xeb, 0x1f },
+- { 0x28, 0x6a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
++ { 0x50, 0xd5 }, { 0x51, 0x01 },
+ { 0xd0, 0x00 },
+ };
+
+@@ -317,7 +309,11 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, enum fe_status *status)
+ if (val >= 7)
+ *status |= FE_HAS_SYNC;
+
+- if (val >= 8) /* Maybe 9? */
++ /*
++ * Actually, on state S8, it starts receiving TS, but the TS
++ * output is only on normal state after the transition to S9.
++ */
++ if (val >= 9)
+ *status |= FE_HAS_LOCK;
+
+ dev_dbg(&state->i2c->dev, "%s: Status = 0x%02x (state = %d)\n",
+@@ -2067,6 +2063,11 @@ static void mb86a20s_release(struct dvb_frontend *fe)
+ kfree(state);
+ }
+
++static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
++{
++ return DVBFE_ALGO_HW;
++}
++
+ static struct dvb_frontend_ops mb86a20s_ops;
+
+ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
+@@ -2140,6 +2141,7 @@ static struct dvb_frontend_ops mb86a20s_ops = {
+ .read_status = mb86a20s_read_status_and_stats,
+ .read_signal_strength = mb86a20s_read_signal_strength_from_cache,
+ .tune = mb86a20s_tune,
++ .get_frontend_algo = mb86a20s_get_frontend_algo,
+ };
+
+ MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware");
+diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
+index 491913778bcc..2f52d66b4dae 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
++++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
+@@ -1264,7 +1264,10 @@ int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
+ dev->board.agc_analog_digital_select_gpio,
+ analog_or_digital);
+
+- return status;
++ if (status < 0)
++ return status;
++
++ return 0;
+ }
+
+ int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
+diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
+index 4a117a58c39a..8389c162bc89 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
++++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
+@@ -486,7 +486,7 @@ struct cx231xx_board cx231xx_boards[] = {
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+- .agc_analog_digital_select_gpio = 0x00, /* According with PV cxPolaris.inf file */
++ .agc_analog_digital_select_gpio = 0x1c,
+ .tuner_sif_gpio = -1,
+ .tuner_scl_gpio = -1,
+ .tuner_sda_gpio = -1,
+diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
+index a2fd49b6be83..19b0293312a0 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-core.c
++++ b/drivers/media/usb/cx231xx/cx231xx-core.c
+@@ -712,6 +712,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
+ break;
+ case CX231XX_BOARD_CNXT_RDE_253S:
+ case CX231XX_BOARD_CNXT_RDU_253S:
++ case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
+ break;
+ case CX231XX_BOARD_HAUPPAUGE_EXETER:
+@@ -738,7 +739,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
+ case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
+- errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
++ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+ break;
+ default:
+ break;
+diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
+index 1105db2355d2..83bfb1659abe 100644
+--- a/drivers/memstick/host/rtsx_usb_ms.c
++++ b/drivers/memstick/host/rtsx_usb_ms.c
+@@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
+ int rc;
+
+ if (!host->req) {
++ pm_runtime_get_sync(ms_dev(host));
+ do {
+ rc = memstick_next_req(msh, &host->req);
+ dev_dbg(ms_dev(host), "next req %d\n", rc);
+@@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
+ host->req->error);
+ }
+ } while (!rc);
++ pm_runtime_put(ms_dev(host));
+ }
+
+ }
+@@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
+ dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
+ __func__, param, value);
+
++ pm_runtime_get_sync(ms_dev(host));
+ mutex_lock(&ucr->dev_mutex);
+
+ err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD);
+@@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
+ }
+ out:
+ mutex_unlock(&ucr->dev_mutex);
++ pm_runtime_put(ms_dev(host));
+
+ /* power-on delay */
+ if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON)
+@@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
+ int err;
+
+ for (;;) {
++ pm_runtime_get_sync(ms_dev(host));
+ mutex_lock(&ucr->dev_mutex);
+
+ /* Check pending MS card changes */
+@@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
+ }
+
+ poll_again:
++ pm_runtime_put(ms_dev(host));
+ if (host->eject)
+ break;
+
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index a8a68acd3267..4e8069866c85 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -66,6 +66,9 @@
+ #ifndef _MEI_HW_MEI_REGS_H_
+ #define _MEI_HW_MEI_REGS_H_
+
++#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
++#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
++
+ /*
+ * MEI device IDs
+ */
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 27678d8154e0..0af3d7d30419 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -87,6 +87,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
+
++ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)},
++ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)},
++
+ /* required last entry */
+ {0, }
+ };
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 64950035613b..f2b733275a0a 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -1755,7 +1755,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_packed *packed = mqrq->packed;
+ bool do_rel_wr, do_data_tag;
+- u32 *packed_cmd_hdr;
++ __le32 *packed_cmd_hdr;
+ u8 hdr_blocks;
+ u8 i = 1;
+
+@@ -2279,7 +2279,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ set_capacity(md->disk, size);
+
+ if (mmc_host_cmd23(card->host)) {
+- if (mmc_card_mmc(card) ||
++ if ((mmc_card_mmc(card) &&
++ card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
+ (mmc_card_sd(card) &&
+ card->scr.cmds & SD_SCR_CMD23_SUPPORT))
+ md->flags |= MMC_BLK_CMD23;
+diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
+index 36cddab57d77..cf30b3712cb2 100644
+--- a/drivers/mmc/card/queue.h
++++ b/drivers/mmc/card/queue.h
+@@ -25,7 +25,7 @@ enum mmc_packed_type {
+
+ struct mmc_packed {
+ struct list_head list;
+- u32 cmd_hdr[1024];
++ __le32 cmd_hdr[1024];
+ unsigned int blocks;
+ u8 nr_entries;
+ u8 retries;
+diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
+index 6c71fc9f76c7..da9f71b8deb0 100644
+--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
++++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
+@@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ dev_dbg(sdmmc_dev(host), "%s\n", __func__);
+ mutex_lock(&ucr->dev_mutex);
+
+- if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) {
+- mutex_unlock(&ucr->dev_mutex);
+- return;
+- }
+-
+ sd_set_power_mode(host, ios->power_mode);
+ sd_set_bus_width(host, ios->bus_width);
+ sd_set_timing(host, ios->timing, &host->ddr_mode);
+@@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
+ container_of(work, struct rtsx_usb_sdmmc, led_work);
+ struct rtsx_ucr *ucr = host->ucr;
+
++ pm_runtime_get_sync(sdmmc_dev(host));
+ mutex_lock(&ucr->dev_mutex);
+
+ if (host->led.brightness == LED_OFF)
+@@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
+ rtsx_usb_turn_on_led(ucr);
+
+ mutex_unlock(&ucr->dev_mutex);
++ pm_runtime_put(sdmmc_dev(host));
+ }
+ #endif
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 552a34dc4f82..64a428984afe 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -675,7 +675,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+ * host->clock is in Hz. target_timeout is in us.
+ * Hence, us = 1000000 * cycles / Hz. Round up.
+ */
+- val = 1000000 * data->timeout_clks;
++ val = 1000000ULL * data->timeout_clks;
+ if (do_div(val, host->clock))
+ target_timeout++;
+ target_timeout += val;
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 56065632a5b8..75286588b823 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -643,7 +643,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ int shutdown)
+ {
+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+- int vol_id = -1, lnum = -1;
++ int erase = 0, keep = 0, vol_id = -1, lnum = -1;
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+ int anchor = wrk->anchor;
+ #endif
+@@ -777,6 +777,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ e1->pnum);
+ scrubbing = 1;
+ goto out_not_moved;
++ } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
++ /*
++ * While a full scan would detect interrupted erasures
++ * at attach time we can face them here when attached from
++ * Fastmap.
++ */
++ dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
++ e1->pnum);
++ erase = 1;
++ goto out_not_moved;
+ }
+
+ ubi_err(ubi, "error %d while reading VID header from PEB %d",
+@@ -810,6 +820,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ * Target PEB had bit-flips or write error - torture it.
+ */
+ torture = 1;
++ keep = 1;
+ goto out_not_moved;
+ }
+
+@@ -895,7 +906,7 @@ out_not_moved:
+ ubi->erroneous_peb_count += 1;
+ } else if (scrubbing)
+ wl_tree_add(e1, &ubi->scrub);
+- else
++ else if (keep)
+ wl_tree_add(e1, &ubi->used);
+ ubi_assert(!ubi->move_to_put);
+ ubi->move_from = ubi->move_to = NULL;
+@@ -907,6 +918,12 @@ out_not_moved:
+ if (err)
+ goto out_ro;
+
++ if (erase) {
++ err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
++ if (err)
++ goto out_ro;
++ }
++
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
+
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index 2e611dc5f162..1c8123816745 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -14819,6 +14819,10 @@ static int bnx2x_get_fc_npiv(struct net_device *dev,
+ }
+
+ offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
++ if (!offset) {
++ DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
++ goto out;
++ }
+ DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
+
+ /* Read the table contents from nvram */
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 67e9633ea9c7..232191417b93 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -2282,7 +2282,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+ struct mlx4_en_dev *mdev = en_priv->mdev;
+ u64 mac_u64 = mlx4_mac_to_u64(mac);
+
+- if (!is_valid_ether_addr(mac))
++ if (is_multicast_ether_addr(mac))
+ return -EINVAL;
+
+ return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
+index 5be34118e0af..f67e7e5b13e1 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
++++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
+@@ -345,9 +345,9 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
+ return &rtl_regdom_no_midband;
+ case COUNTRY_CODE_IC:
+ return &rtl_regdom_11;
+- case COUNTRY_CODE_ETSI:
+ case COUNTRY_CODE_TELEC_NETGEAR:
+ return &rtl_regdom_60_64;
++ case COUNTRY_CODE_ETSI:
+ case COUNTRY_CODE_SPAIN:
+ case COUNTRY_CODE_FRANCE:
+ case COUNTRY_CODE_ISRAEL:
+@@ -406,6 +406,8 @@ static u8 channel_plan_to_country_code(u8 channelplan)
+ return COUNTRY_CODE_WORLD_WIDE_13;
+ case 0x22:
+ return COUNTRY_CODE_IC;
++ case 0x25:
++ return COUNTRY_CODE_ETSI;
+ case 0x32:
+ return COUNTRY_CODE_TELEC_NETGEAR;
+ case 0x41:
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 42774bc39786..254192b5dad1 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3136,6 +3136,7 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
+
+ static void quirk_no_pm_reset(struct pci_dev *dev)
+ {
+diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
+index fb991ec76423..696116ebdf50 100644
+--- a/drivers/regulator/tps65910-regulator.c
++++ b/drivers/regulator/tps65910-regulator.c
+@@ -1111,6 +1111,12 @@ static int tps65910_probe(struct platform_device *pdev)
+ pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
+ pmic->ext_sleep_control = tps65910_ext_sleep_control;
+ info = tps65910_regs;
++ /* Work around silicon erratum SWCZ010: output programmed
++ * voltage level can go higher than expected or crash
++ * Workaround: use no synchronization of DCDC clocks
++ */
++ tps65910_reg_clear_bits(pmic->mfd, TPS65910_DCDCCTRL,
++ DCDCCTRL_DCDCCKSYNC_MASK);
+ break;
+ case TPS65911:
+ pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
+diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
+index 5d7fbe4e907e..581001989937 100644
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -3,7 +3,7 @@
+ *
+ * Debug traces for zfcp.
+ *
+- * Copyright IBM Corp. 2002, 2013
++ * Copyright IBM Corp. 2002, 2016
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -65,7 +65,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @req: request for which a response was received
+ */
+-void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
++void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
+ {
+ struct zfcp_dbf *dbf = req->adapter->dbf;
+ struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
+@@ -85,6 +85,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
+ rec->u.res.req_issued = req->issued;
+ rec->u.res.prot_status = q_pref->prot_status;
+ rec->u.res.fsf_status = q_head->fsf_status;
++ rec->u.res.port_handle = q_head->port_handle;
++ rec->u.res.lun_handle = q_head->lun_handle;
+
+ memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
+ FSF_PROT_STATUS_QUAL_SIZE);
+@@ -97,7 +99,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
+ rec->pl_len, "fsf_res", req->req_id);
+ }
+
+- debug_event(dbf->hba, 1, rec, sizeof(*rec));
++ debug_event(dbf->hba, level, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->hba_lock, flags);
+ }
+
+@@ -241,7 +243,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
+ if (sdev) {
+ rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
+ rec->lun = zfcp_scsi_dev_lun(sdev);
+- }
++ } else
++ rec->lun = ZFCP_DBF_INVALID_LUN;
+ }
+
+ /**
+@@ -320,13 +323,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+ spin_unlock_irqrestore(&dbf->rec_lock, flags);
+ }
+
++/**
++ * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
++ * @tag: identifier for event
++ * @wka_port: well known address port
++ * @req_id: request ID to correlate with potential HBA trace record
++ */
++void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
++ u64 req_id)
++{
++ struct zfcp_dbf *dbf = wka_port->adapter->dbf;
++ struct zfcp_dbf_rec *rec = &dbf->rec_buf;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dbf->rec_lock, flags);
++ memset(rec, 0, sizeof(*rec));
++
++ rec->id = ZFCP_DBF_REC_RUN;
++ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
++ rec->port_status = wka_port->status;
++ rec->d_id = wka_port->d_id;
++ rec->lun = ZFCP_DBF_INVALID_LUN;
++
++ rec->u.run.fsf_req_id = req_id;
++ rec->u.run.rec_status = ~0;
++ rec->u.run.rec_step = ~0;
++ rec->u.run.rec_action = ~0;
++ rec->u.run.rec_count = ~0;
++
++ debug_event(dbf->rec, 1, rec, sizeof(*rec));
++ spin_unlock_irqrestore(&dbf->rec_lock, flags);
++}
++
+ static inline
+-void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
+- u64 req_id, u32 d_id)
++void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
++ char *paytag, struct scatterlist *sg, u8 id, u16 len,
++ u64 req_id, u32 d_id, u16 cap_len)
+ {
+ struct zfcp_dbf_san *rec = &dbf->san_buf;
+ u16 rec_len;
+ unsigned long flags;
++ struct zfcp_dbf_pay *payload = &dbf->pay_buf;
++ u16 pay_sum = 0;
+
+ spin_lock_irqsave(&dbf->san_lock, flags);
+ memset(rec, 0, sizeof(*rec));
+@@ -334,10 +372,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
+ rec->id = id;
+ rec->fsf_req_id = req_id;
+ rec->d_id = d_id;
+- rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
+- memcpy(rec->payload, data, rec_len);
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
++ rec->pl_len = len; /* full length even if we cap pay below */
++ if (!sg)
++ goto out;
++ rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
++ memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
++ if (len <= rec_len)
++ goto out; /* skip pay record if full content in rec->payload */
++
++ /* if (len > rec_len):
++ * dump data up to cap_len ignoring small duplicate in rec->payload
++ */
++ spin_lock(&dbf->pay_lock);
++ memset(payload, 0, sizeof(*payload));
++ memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
++ payload->fsf_req_id = req_id;
++ payload->counter = 0;
++ for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
++ u16 pay_len, offset = 0;
++
++ while (offset < sg->length && pay_sum < cap_len) {
++ pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
++ (u16)(sg->length - offset));
++ /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
++ memcpy(payload->data, sg_virt(sg) + offset, pay_len);
++ debug_event(dbf->pay, 1, payload,
++ zfcp_dbf_plen(pay_len));
++ payload->counter++;
++ offset += pay_len;
++ pay_sum += pay_len;
++ }
++ }
++ spin_unlock(&dbf->pay_lock);
+
++out:
+ debug_event(dbf->san, 1, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->san_lock, flags);
+ }
+@@ -354,9 +423,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
+ struct zfcp_fsf_ct_els *ct_els = fsf->data;
+ u16 length;
+
+- length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
+- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
+- fsf->req_id, d_id);
++ length = (u16)zfcp_qdio_real_bytes(ct_els->req);
++ zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
++ length, fsf->req_id, d_id, length);
++}
++
++static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
++ struct zfcp_fsf_req *fsf,
++ u16 len)
++{
++ struct zfcp_fsf_ct_els *ct_els = fsf->data;
++ struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
++ struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
++ struct scatterlist *resp_entry = ct_els->resp;
++ struct fc_gpn_ft_resp *acc;
++ int max_entries, x, last = 0;
++
++ if (!(memcmp(tag, "fsscth2", 7) == 0
++ && ct_els->d_id == FC_FID_DIR_SERV
++ && reqh->ct_rev == FC_CT_REV
++ && reqh->ct_in_id[0] == 0
++ && reqh->ct_in_id[1] == 0
++ && reqh->ct_in_id[2] == 0
++ && reqh->ct_fs_type == FC_FST_DIR
++ && reqh->ct_fs_subtype == FC_NS_SUBTYPE
++ && reqh->ct_options == 0
++ && reqh->_ct_resvd1 == 0
++ && reqh->ct_cmd == FC_NS_GPN_FT
++ /* reqh->ct_mr_size can vary so do not match but read below */
++ && reqh->_ct_resvd2 == 0
++ && reqh->ct_reason == 0
++ && reqh->ct_explan == 0
++ && reqh->ct_vendor == 0
++ && reqn->fn_resvd == 0
++ && reqn->fn_domain_id_scope == 0
++ && reqn->fn_area_id_scope == 0
++ && reqn->fn_fc4_type == FC_TYPE_FCP))
++ return len; /* not GPN_FT response so do not cap */
++
++ acc = sg_virt(resp_entry);
++ max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
++ + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
++ * to account for header as 1st pseudo "entry" */;
++
++ /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
++ * response, allowing us to skip special handling for it - just skip it
++ */
++ for (x = 1; x < max_entries && !last; x++) {
++ if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
++ acc++;
++ else
++ acc = sg_virt(++resp_entry);
++
++ last = acc->fp_flags & FC_NS_FID_LAST;
++ }
++ len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
++ return len; /* cap after last entry */
+ }
+
+ /**
+@@ -370,9 +492,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
+ struct zfcp_fsf_ct_els *ct_els = fsf->data;
+ u16 length;
+
+- length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
+- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
+- fsf->req_id, 0);
++ length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
++ zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
++ length, fsf->req_id, ct_els->d_id,
++ zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
+ }
+
+ /**
+@@ -386,11 +509,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
+ struct fsf_status_read_buffer *srb =
+ (struct fsf_status_read_buffer *) fsf->data;
+ u16 length;
++ struct scatterlist sg;
+
+ length = (u16)(srb->length -
+ offsetof(struct fsf_status_read_buffer, payload));
+- zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
+- fsf->req_id, ntoh24(srb->d_id));
++ sg_init_one(&sg, srb->payload.data, length);
++ zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
++ fsf->req_id, ntoh24(srb->d_id), length);
+ }
+
+ /**
+@@ -399,7 +524,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
+ * @sc: pointer to struct scsi_cmnd
+ * @fsf: pointer to struct zfcp_fsf_req
+ */
+-void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
++void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
++ struct zfcp_fsf_req *fsf)
+ {
+ struct zfcp_adapter *adapter =
+ (struct zfcp_adapter *) sc->device->host->hostdata[0];
+@@ -442,7 +568,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
+ }
+ }
+
+- debug_event(dbf->scsi, 1, rec, sizeof(*rec));
++ debug_event(dbf->scsi, level, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->scsi_lock, flags);
+ }
+
+diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
+index 0be3d48681ae..36d07584271d 100644
+--- a/drivers/s390/scsi/zfcp_dbf.h
++++ b/drivers/s390/scsi/zfcp_dbf.h
+@@ -2,7 +2,7 @@
+ * zfcp device driver
+ * debug feature declarations
+ *
+- * Copyright IBM Corp. 2008, 2010
++ * Copyright IBM Corp. 2008, 2015
+ */
+
+ #ifndef ZFCP_DBF_H
+@@ -17,6 +17,11 @@
+
+ #define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
+
++enum zfcp_dbf_pseudo_erp_act_type {
++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff,
++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe,
++};
++
+ /**
+ * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
+ * @ready: number of ready recovery actions
+@@ -110,6 +115,7 @@ struct zfcp_dbf_san {
+ u32 d_id;
+ #define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
+ char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
++ u16 pl_len;
+ } __packed;
+
+ /**
+@@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res {
+ u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
+ u32 fsf_status;
+ u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
++ u32 port_handle;
++ u32 lun_handle;
+ } __packed;
+
+ /**
+@@ -279,7 +287,7 @@ static inline
+ void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
+ {
+ if (debug_level_enabled(req->adapter->dbf->hba, level))
+- zfcp_dbf_hba_fsf_res(tag, req);
++ zfcp_dbf_hba_fsf_res(tag, level, req);
+ }
+
+ /**
+@@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
+ scmd->device->host->hostdata[0];
+
+ if (debug_level_enabled(adapter->dbf->scsi, level))
+- zfcp_dbf_scsi(tag, scmd, req);
++ zfcp_dbf_scsi(tag, level, scmd, req);
+ }
+
+ /**
+diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
+index 3fb410977014..a59d678125bd 100644
+--- a/drivers/s390/scsi/zfcp_erp.c
++++ b/drivers/s390/scsi/zfcp_erp.c
+@@ -3,7 +3,7 @@
+ *
+ * Error Recovery Procedures (ERP).
+ *
+- * Copyright IBM Corp. 2002, 2010
++ * Copyright IBM Corp. 2002, 2015
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -1217,8 +1217,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
+ break;
+
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+- if (result == ZFCP_ERP_SUCCEEDED)
+- zfcp_scsi_schedule_rport_register(port);
++ /* This switch case might also happen after a forced reopen
++ * was successfully done and thus overwritten with a new
++ * non-forced reopen at `ersfs_2'. In this case, we must not
++ * do the clean-up of the non-forced version.
++ */
++ if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
++ if (result == ZFCP_ERP_SUCCEEDED)
++ zfcp_scsi_schedule_rport_register(port);
+ /* fall through */
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ put_device(&port->dev);
+diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
+index 5b500652572b..c8fed9fa1cca 100644
+--- a/drivers/s390/scsi/zfcp_ext.h
++++ b/drivers/s390/scsi/zfcp_ext.h
+@@ -3,7 +3,7 @@
+ *
+ * External function declarations.
+ *
+- * Copyright IBM Corp. 2002, 2010
++ * Copyright IBM Corp. 2002, 2015
+ */
+
+ #ifndef ZFCP_EXT_H
+@@ -35,8 +35,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
+ extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
+ struct zfcp_port *, struct scsi_device *, u8, u8);
+ extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
++extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
+ extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
+-extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
++extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
+@@ -44,7 +45,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
+ extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
+ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
+-extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
++extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
++ struct zfcp_fsf_req *);
+
+ /* zfcp_erp.c */
+ extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index 522a633c866a..75f820ca17b7 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -3,7 +3,7 @@
+ *
+ * Implementation of FSF commands.
+ *
+- * Copyright IBM Corp. 2002, 2013
++ * Copyright IBM Corp. 2002, 2015
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -508,7 +508,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+ fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+ break;
+ case FSF_TOPO_FABRIC:
+- fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
++ if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
++ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
++ else
++ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ break;
+ case FSF_TOPO_AL:
+ fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+@@ -613,7 +616,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
+
+ if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
+ fc_host_permanent_port_name(shost) = bottom->wwpn;
+- fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ } else
+ fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+ fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
+@@ -982,8 +984,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
+ if (zfcp_adapter_multi_buffer_active(adapter)) {
+ if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
+ return -EIO;
++ qtcb->bottom.support.req_buf_length =
++ zfcp_qdio_real_bytes(sg_req);
+ if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
+ return -EIO;
++ qtcb->bottom.support.resp_buf_length =
++ zfcp_qdio_real_bytes(sg_resp);
+
+ zfcp_qdio_set_data_div(qdio, &req->qdio_req,
+ zfcp_qdio_sbale_count(sg_req));
+@@ -1073,6 +1079,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
+
+ req->handler = zfcp_fsf_send_ct_handler;
+ req->qtcb->header.port_handle = wka_port->handle;
++ ct->d_id = wka_port->d_id;
+ req->data = ct;
+
+ zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
+@@ -1169,6 +1176,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
+
+ hton24(req->qtcb->bottom.support.d_id, d_id);
+ req->handler = zfcp_fsf_send_els_handler;
++ els->d_id = d_id;
+ req->data = els;
+
+ zfcp_dbf_san_req("fssels1", req, d_id);
+@@ -1575,7 +1583,7 @@ out:
+ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
+ {
+ struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+- struct zfcp_fsf_req *req;
++ struct zfcp_fsf_req *req = NULL;
+ int retval = -EIO;
+
+ spin_lock_irq(&qdio->req_q_lock);
+@@ -1604,6 +1612,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
+ zfcp_fsf_req_free(req);
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
++ if (req && !IS_ERR(req))
++ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
+ return retval;
+ }
+
+@@ -1628,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
+ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
+ {
+ struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+- struct zfcp_fsf_req *req;
++ struct zfcp_fsf_req *req = NULL;
+ int retval = -EIO;
+
+ spin_lock_irq(&qdio->req_q_lock);
+@@ -1657,6 +1667,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
+ zfcp_fsf_req_free(req);
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
++ if (req && !IS_ERR(req))
++ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
+ return retval;
+ }
+
+diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
+index 57ae3ae1046d..be1c04b334c5 100644
+--- a/drivers/s390/scsi/zfcp_fsf.h
++++ b/drivers/s390/scsi/zfcp_fsf.h
+@@ -3,7 +3,7 @@
+ *
+ * Interface to the FSF support functions.
+ *
+- * Copyright IBM Corp. 2002, 2010
++ * Copyright IBM Corp. 2002, 2015
+ */
+
+ #ifndef FSF_H
+@@ -436,6 +436,7 @@ struct zfcp_blk_drv_data {
+ * @handler_data: data passed to handler function
+ * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
+ * @status: used to pass error status to calling function
++ * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS
+ */
+ struct zfcp_fsf_ct_els {
+ struct scatterlist *req;
+@@ -444,6 +445,7 @@ struct zfcp_fsf_ct_els {
+ void *handler_data;
+ struct zfcp_port *port;
+ int status;
++ u32 d_id;
+ };
+
+ #endif /* FSF_H */
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index b3c6ff49103b..9069f98a1817 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -3,7 +3,7 @@
+ *
+ * Interface to Linux SCSI midlayer.
+ *
+- * Copyright IBM Corp. 2002, 2013
++ * Copyright IBM Corp. 2002, 2015
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -556,6 +556,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
+ ids.port_id = port->d_id;
+ ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+
++ zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
++ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
+ rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
+ if (!rport) {
+ dev_err(&port->adapter->ccw_device->dev,
+@@ -577,6 +580,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
+ struct fc_rport *rport = port->rport;
+
+ if (rport) {
++ zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
++ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
+ fc_remote_port_delete(rport);
+ port->rport = NULL;
+ }
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index a3860367b568..e9ce74afd13f 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3930,6 +3930,70 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h,
+ return rc;
+ }
+
++static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
++{
++ struct bmic_identify_physical_device *id_phys;
++ bool is_spare = false;
++ int rc;
++
++ id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
++ if (!id_phys)
++ return false;
++
++ rc = hpsa_bmic_id_physical_device(h,
++ lunaddrbytes,
++ GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
++ id_phys, sizeof(*id_phys));
++ if (rc == 0)
++ is_spare = (id_phys->more_flags >> 6) & 0x01;
++
++ kfree(id_phys);
++ return is_spare;
++}
++
++#define RPL_DEV_FLAG_NON_DISK 0x1
++#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
++#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
++
++#define BMIC_DEVICE_TYPE_ENCLOSURE 6
++
++static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
++ struct ext_report_lun_entry *rle)
++{
++ u8 device_flags;
++ u8 device_type;
++
++ if (!MASKED_DEVICE(lunaddrbytes))
++ return false;
++
++ device_flags = rle->device_flags;
++ device_type = rle->device_type;
++
++ if (device_flags & RPL_DEV_FLAG_NON_DISK) {
++ if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
++ return false;
++ return true;
++ }
++
++ if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
++ return false;
++
++ if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
++ return false;
++
++ /*
++ * Spares may be spun down, we do not want to
++ * do an Inquiry to a RAID set spare drive as
++ * that would have them spun up, that is a
++ * performance hit because I/O to the RAID device
++ * stops while the spin up occurs which can take
++ * over 50 seconds.
++ */
++ if (hpsa_is_disk_spare(h, lunaddrbytes))
++ return true;
++
++ return false;
++}
+
+ static void hpsa_update_scsi_devices(struct ctlr_info *h)
+ {
+@@ -4023,6 +4087,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
+ u8 *lunaddrbytes, is_OBDR = 0;
+ int rc = 0;
+ int phys_dev_index = i - (raid_ctlr_position == 0);
++ bool skip_device = false;
+
+ physical_device = i < nphysicals + (raid_ctlr_position == 0);
+
+@@ -4030,10 +4095,15 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
+ lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
+ i, nphysicals, nlogicals, physdev_list, logdev_list);
+
+- /* skip masked non-disk devices */
+- if (MASKED_DEVICE(lunaddrbytes) && physical_device &&
+- (physdev_list->LUN[phys_dev_index].device_flags & 0x01))
+- continue;
++ /*
++ * Skip over some devices such as a spare.
++ */
++ if (!tmpdevice->external && physical_device) {
++ skip_device = hpsa_skip_device(h, lunaddrbytes,
++ &physdev_list->LUN[phys_dev_index]);
++ if (skip_device)
++ continue;
++ }
+
+ /* Get device type, vendor, model, device id */
+ rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index f0cfaacbfabd..692445bcca6f 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1459,12 +1459,12 @@ retry:
+ out_err:
+ kfree(lun_data);
+ out:
+- scsi_device_put(sdev);
+ if (scsi_device_created(sdev))
+ /*
+ * the sdev we used didn't appear in the report luns scan
+ */
+ __scsi_remove_device(sdev);
++ scsi_device_put(sdev);
+ return ret;
+ }
+
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 0d7c6e86f149..6ee50742f6a5 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2879,10 +2879,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ if (sdkp->opt_xfer_blocks &&
+ sdkp->opt_xfer_blocks <= dev_max &&
+ sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
+- sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
+- rw_max = q->limits.io_opt =
+- sdkp->opt_xfer_blocks * sdp->sector_size;
+- else
++ logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_CACHE_SIZE) {
++ q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
++ rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
++ } else
+ rw_max = BLK_DEF_MAX_SECTORS;
+
+ /* Combine with controller limits */
+diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
+index 654630bb7d0e..765a6f1ac1b7 100644
+--- a/drivers/scsi/sd.h
++++ b/drivers/scsi/sd.h
+@@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
+ return blocks << (ilog2(sdev->sector_size) - 9);
+ }
+
++static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
++{
++ return blocks * sdev->sector_size;
++}
++
+ /*
+ * A DIF-capable target device can be formatted with different
+ * protection schemes. Currently 0 through 3 are defined:
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 7bc3778a1ac9..2a67af4e2e13 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1680,6 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
+ case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
++ case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
+ break;
+ case TCM_OUT_OF_RESOURCES:
+ sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+@@ -2509,8 +2510,10 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
+ * fabric acknowledgement that requires two target_put_sess_cmd()
+ * invocations before se_cmd descriptor release.
+ */
+- if (ack_kref)
++ if (ack_kref) {
+ kref_get(&se_cmd->cmd_kref);
++ se_cmd->se_cmd_flags |= SCF_ACK_KREF;
++ }
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ if (se_sess->sess_tearing_down) {
+@@ -2833,6 +2836,12 @@ static const struct sense_info sense_info_table[] = {
+ .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
+ .add_sector_info = true,
+ },
++ [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
++ .key = COPY_ABORTED,
++ .asc = 0x0d,
++ .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
++
++ },
+ [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
+ /*
+ * Returning ILLEGAL REQUEST would cause immediate IO errors on
+diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
+index 47fe94ee10b8..153a6f255b6d 100644
+--- a/drivers/target/target_core_xcopy.c
++++ b/drivers/target/target_core_xcopy.c
+@@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
+ }
+ mutex_unlock(&g_device_mutex);
+
+- pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
++ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ return -EINVAL;
+ }
+
+@@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
+
+ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
+ struct xcopy_op *xop, unsigned char *p,
+- unsigned short tdll)
++ unsigned short tdll, sense_reason_t *sense_ret)
+ {
+ struct se_device *local_dev = se_cmd->se_dev;
+ unsigned char *desc = p;
+@@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
+ unsigned short start = 0;
+ bool src = true;
+
++ *sense_ret = TCM_INVALID_PARAMETER_LIST;
++
+ if (offset != 0) {
+ pr_err("XCOPY target descriptor list length is not"
+ " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
+@@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
+ rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
+ else
+ rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
+-
+- if (rc < 0)
++ /*
++ * If a matching IEEE NAA 0x83 descriptor for the requested device
++ * is not located on this node, return COPY_ABORTED with ASQ/ASQC
++ * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
++ * initiator to fall back to normal copy method.
++ */
++ if (rc < 0) {
++ *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
+ goto out;
++ }
+
+ pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
+ xop->src_dev, &xop->src_tid_wwn[0]);
+@@ -653,6 +662,7 @@ static int target_xcopy_read_source(
+ rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
+ remote_port, true);
+ if (rc < 0) {
++ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
+ }
+@@ -664,6 +674,7 @@ static int target_xcopy_read_source(
+
+ rc = target_xcopy_issue_pt_cmd(xpt_cmd);
+ if (rc < 0) {
++ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
+ }
+@@ -714,6 +725,7 @@ static int target_xcopy_write_destination(
+ remote_port, false);
+ if (rc < 0) {
+ struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
++ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
+ /*
+ * If the failure happened before the t_mem_list hand-off in
+ * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
+@@ -729,6 +741,7 @@ static int target_xcopy_write_destination(
+
+ rc = target_xcopy_issue_pt_cmd(xpt_cmd);
+ if (rc < 0) {
++ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
+ se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
+@@ -815,9 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work)
+ out:
+ xcopy_pt_undepend_remotedev(xop);
+ kfree(xop);
+-
+- pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
+- ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
++ /*
++ * Don't override an error scsi status if it has already been set
++ */
++ if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
++ pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
++ " CHECK_CONDITION -> sending response\n", rc);
++ ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
++ }
+ target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
+ }
+
+@@ -875,7 +893,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
+ " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
+ tdll, sdll, inline_dl);
+
+- rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
++ rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
+ if (rc <= 0)
+ goto out;
+
+diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
+index 95d293b7445a..dc2fcda54d53 100644
+--- a/drivers/video/fbdev/efifb.c
++++ b/drivers/video/fbdev/efifb.c
+@@ -52,9 +52,9 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ return 1;
+
+ if (regno < 16) {
+- red >>= 8;
+- green >>= 8;
+- blue >>= 8;
++ red >>= 16 - info->var.red.length;
++ green >>= 16 - info->var.green.length;
++ blue >>= 16 - info->var.blue.length;
+ ((u32 *)(info->pseudo_palette))[regno] =
+ (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 3c68e6aee2f0..c8222bfe1e56 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -929,7 +929,8 @@ again:
+ statret = __ceph_do_getattr(inode, page,
+ CEPH_STAT_CAP_INLINE_DATA, !!page);
+ if (statret < 0) {
+- __free_page(page);
++ if (page)
++ __free_page(page);
+ if (statret == -ENODATA) {
+ BUG_ON(retry_op != READ_INLINE);
+ goto again;
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index 50b268483302..0a3544fb50f9 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -152,6 +152,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ list_for_each(tmp1, &cifs_tcp_ses_list) {
+ server = list_entry(tmp1, struct TCP_Server_Info,
+ tcp_ses_list);
++ seq_printf(m, "\nNumber of credits: %d", server->credits);
+ i++;
+ list_for_each(tmp2, &server->smb_ses_list) {
+ ses = list_entry(tmp2, struct cifs_ses,
+@@ -255,7 +256,6 @@ static const struct file_operations cifs_debug_data_proc_fops = {
+ static ssize_t cifs_stats_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+ {
+- char c;
+ bool bv;
+ int rc;
+ struct list_head *tmp1, *tmp2, *tmp3;
+@@ -263,11 +263,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+
+- rc = get_user(c, buffer);
+- if (rc)
+- return rc;
+-
+- if (strtobool(&c, &bv) == 0) {
++ rc = kstrtobool_from_user(buffer, count, &bv);
++ if (rc == 0) {
+ #ifdef CONFIG_CIFS_STATS2
+ atomic_set(&totBufAllocCount, 0);
+ atomic_set(&totSmBufAllocCount, 0);
+@@ -290,6 +287,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+ }
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
++ } else {
++ return rc;
+ }
+
+ return count;
+@@ -433,17 +432,17 @@ static int cifsFYI_proc_open(struct inode *inode, struct file *file)
+ static ssize_t cifsFYI_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+ {
+- char c;
++ char c[2] = { '\0' };
+ bool bv;
+ int rc;
+
+- rc = get_user(c, buffer);
++ rc = get_user(c[0], buffer);
+ if (rc)
+ return rc;
+- if (strtobool(&c, &bv) == 0)
++ if (strtobool(c, &bv) == 0)
+ cifsFYI = bv;
+- else if ((c > '1') && (c <= '9'))
+- cifsFYI = (int) (c - '0'); /* see cifs_debug.h for meanings */
++ else if ((c[0] > '1') && (c[0] <= '9'))
++ cifsFYI = (int) (c[0] - '0'); /* see cifs_debug.h for meanings */
+
+ return count;
+ }
+@@ -471,20 +470,12 @@ static int cifs_linux_ext_proc_open(struct inode *inode, struct file *file)
+ static ssize_t cifs_linux_ext_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+ {
+- char c;
+- bool bv;
+ int rc;
+
+- rc = get_user(c, buffer);
++ rc = kstrtobool_from_user(buffer, count, &linuxExtEnabled);
+ if (rc)
+ return rc;
+
+- rc = strtobool(&c, &bv);
+- if (rc)
+- return rc;
+-
+- linuxExtEnabled = bv;
+-
+ return count;
+ }
+
+@@ -511,20 +502,12 @@ static int cifs_lookup_cache_proc_open(struct inode *inode, struct file *file)
+ static ssize_t cifs_lookup_cache_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+ {
+- char c;
+- bool bv;
+ int rc;
+
+- rc = get_user(c, buffer);
++ rc = kstrtobool_from_user(buffer, count, &lookupCacheEnabled);
+ if (rc)
+ return rc;
+
+- rc = strtobool(&c, &bv);
+- if (rc)
+- return rc;
+-
+- lookupCacheEnabled = bv;
+-
+ return count;
+ }
+
+@@ -551,20 +534,12 @@ static int traceSMB_proc_open(struct inode *inode, struct file *file)
+ static ssize_t traceSMB_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+ {
+- char c;
+- bool bv;
+ int rc;
+
+- rc = get_user(c, buffer);
++ rc = kstrtobool_from_user(buffer, count, &traceSMB);
+ if (rc)
+ return rc;
+
+- rc = strtobool(&c, &bv);
+- if (rc)
+- return rc;
+-
+- traceSMB = bv;
+-
+ return count;
+ }
+
+@@ -622,7 +597,6 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
+ int rc;
+ unsigned int flags;
+ char flags_string[12];
+- char c;
+ bool bv;
+
+ if ((count < 1) || (count > 11))
+@@ -635,11 +609,10 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
+
+ if (count < 3) {
+ /* single char or single char followed by null */
+- c = flags_string[0];
+- if (strtobool(&c, &bv) == 0) {
++ if (strtobool(flags_string, &bv) == 0) {
+ global_secflags = bv ? CIFSSEC_MAX : CIFSSEC_DEF;
+ return count;
+- } else if (!isdigit(c)) {
++ } else if (!isdigit(flags_string[0])) {
+ cifs_dbg(VFS, "Invalid SecurityFlags: %s\n",
+ flags_string);
+ return -EINVAL;
+diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
+index 66cf0f9fff89..c611ca2339d7 100644
+--- a/fs/cifs/cifs_debug.h
++++ b/fs/cifs/cifs_debug.h
+@@ -25,7 +25,7 @@
+ void cifs_dump_mem(char *label, void *data, int length);
+ void cifs_dump_detail(void *);
+ void cifs_dump_mids(struct TCP_Server_Info *);
+-extern int traceSMB; /* flag which enables the function below */
++extern bool traceSMB; /* flag which enables the function below */
+ void dump_smb(void *, int);
+ #define CIFS_INFO 0x01
+ #define CIFS_RC 0x02
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 450578097fb7..4f4fc9ff3636 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -54,10 +54,10 @@
+ #endif
+
+ int cifsFYI = 0;
+-int traceSMB = 0;
++bool traceSMB;
+ bool enable_oplocks = true;
+-unsigned int linuxExtEnabled = 1;
+-unsigned int lookupCacheEnabled = 1;
++bool linuxExtEnabled = true;
++bool lookupCacheEnabled = true;
+ unsigned int global_secflags = CIFSSEC_DEF;
+ /* unsigned int ntlmv2_support = 0; */
+ unsigned int sign_CIFS_PDUs = 1;
+@@ -268,7 +268,7 @@ cifs_alloc_inode(struct super_block *sb)
+ cifs_inode->createtime = 0;
+ cifs_inode->epoch = 0;
+ #ifdef CONFIG_CIFS_SMB2
+- get_random_bytes(cifs_inode->lease_key, SMB2_LEASE_KEY_SIZE);
++ generate_random_uuid(cifs_inode->lease_key);
+ #endif
+ /*
+ * Can not set i_flags here - they get immediately overwritten to zero
+@@ -1210,7 +1210,6 @@ init_cifs(void)
+ GlobalTotalActiveXid = 0;
+ GlobalMaxActiveXid = 0;
+ spin_lock_init(&cifs_tcp_ses_lock);
+- spin_lock_init(&cifs_file_list_lock);
+ spin_lock_init(&GlobalMid_Lock);
+
+ if (cifs_max_pending < 2) {
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 2b510c537a0d..c669a1471395 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -827,6 +827,7 @@ struct cifs_tcon {
+ struct list_head tcon_list;
+ int tc_count;
+ struct list_head openFileList;
++ spinlock_t open_file_lock; /* protects list above */
+ struct cifs_ses *ses; /* pointer to session associated with */
+ char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
+ char *nativeFileSystem;
+@@ -883,7 +884,7 @@ struct cifs_tcon {
+ #endif /* CONFIG_CIFS_STATS2 */
+ __u64 bytes_read;
+ __u64 bytes_written;
+- spinlock_t stat_lock;
++ spinlock_t stat_lock; /* protects the two fields above */
+ #endif /* CONFIG_CIFS_STATS */
+ FILE_SYSTEM_DEVICE_INFO fsDevInfo;
+ FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
+@@ -1034,8 +1035,10 @@ struct cifs_fid_locks {
+ };
+
+ struct cifsFileInfo {
++ /* following two lists are protected by tcon->open_file_lock */
+ struct list_head tlist; /* pointer to next fid owned by tcon */
+ struct list_head flist; /* next fid (file instance) for this inode */
++ /* lock list below protected by cifsi->lock_sem */
+ struct cifs_fid_locks *llist; /* brlocks held by this fid */
+ kuid_t uid; /* allows finding which FileInfo structure */
+ __u32 pid; /* process id who opened file */
+@@ -1043,11 +1046,12 @@ struct cifsFileInfo {
+ /* BB add lock scope info here if needed */ ;
+ /* lock scope id (0 if none) */
+ struct dentry *dentry;
+- unsigned int f_flags;
+ struct tcon_link *tlink;
++ unsigned int f_flags;
+ bool invalidHandle:1; /* file closed via session abend */
+ bool oplock_break_cancelled:1;
+- int count; /* refcount protected by cifs_file_list_lock */
++ int count;
++ spinlock_t file_info_lock; /* protects four flag/count fields above */
+ struct mutex fh_mutex; /* prevents reopen race after dead ses*/
+ struct cifs_search_info srch_inf;
+ struct work_struct oplock_break; /* work for oplock breaks */
+@@ -1114,7 +1118,7 @@ struct cifs_writedata {
+
+ /*
+ * Take a reference on the file private data. Must be called with
+- * cifs_file_list_lock held.
++ * cfile->file_info_lock held.
+ */
+ static inline void
+ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
+@@ -1508,8 +1512,10 @@ require use of the stronger protocol */
+ * GlobalMid_Lock protects:
+ * list operations on pending_mid_q and oplockQ
+ * updates to XID counters, multiplex id and SMB sequence numbers
+- * cifs_file_list_lock protects:
+- * list operations on tcp and SMB session lists and tCon lists
++ * tcp_ses_lock protects:
++ * list operations on tcp and SMB session lists
++ * tcon->open_file_lock protects the list of open files hanging off the tcon
++ * cfile->file_info_lock protects counters and fields in cifs file struct
+ * f_owner.lock protects certain per file struct operations
+ * mapping->page_lock protects certain per page operations
+ *
+@@ -1541,18 +1547,12 @@ GLOBAL_EXTERN struct list_head cifs_tcp_ses_list;
+ * tcp session, and the list of tcon's per smb session. It also protects
+ * the reference counters for the server, smb session, and tcon. Finally,
+ * changes to the tcon->tidStatus should be done while holding this lock.
++ * generally the locks should be taken in order tcp_ses_lock before
++ * tcon->open_file_lock and that before file->file_info_lock since the
++ * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
+ */
+ GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock;
+
+-/*
+- * This lock protects the cifs_file->llist and cifs_file->flist
+- * list operations, and updates to some flags (cifs_file->invalidHandle)
+- * It will be moved to either use the tcon->stat_lock or equivalent later.
+- * If cifs_tcp_ses_lock and the lock below are both needed to be held, then
+- * the cifs_tcp_ses_lock must be grabbed first and released last.
+- */
+-GLOBAL_EXTERN spinlock_t cifs_file_list_lock;
+-
+ #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
+ /* Outstanding dir notify requests */
+ GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
+@@ -1588,11 +1588,11 @@ GLOBAL_EXTERN atomic_t midCount;
+
+ /* Misc globals */
+ GLOBAL_EXTERN bool enable_oplocks; /* enable or disable oplocks */
+-GLOBAL_EXTERN unsigned int lookupCacheEnabled;
++GLOBAL_EXTERN bool lookupCacheEnabled;
+ GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
+ with more secure ntlmssp2 challenge/resp */
+ GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */
+-GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
++GLOBAL_EXTERN bool linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
+ GLOBAL_EXTERN unsigned int CIFSMaxBufSize; /* max size not including hdr */
+ GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
+ GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 76fcb50295a3..b1104ed8f54c 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -98,13 +98,13 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
+ struct list_head *tmp1;
+
+ /* list all files open on tree connection and mark them invalid */
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
+ open_file = list_entry(tmp, struct cifsFileInfo, tlist);
+ open_file->invalidHandle = true;
+ open_file->oplock_break_cancelled = true;
+ }
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ /*
+ * BB Add call to invalidate_inodes(sb) for all superblocks mounted
+ * to this tcon.
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 61c3a5ab8637..812a8cb07c63 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2200,7 +2200,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
+ memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
+ sizeof(tcp_ses->dstaddr));
+ #ifdef CONFIG_CIFS_SMB2
+- get_random_bytes(tcp_ses->client_guid, SMB2_CLIENT_GUID_SIZE);
++ generate_random_uuid(tcp_ses->client_guid);
+ #endif
+ /*
+ * at this point we are the only ones with the pointer
+@@ -3693,14 +3693,16 @@ remote_path_check:
+ goto mount_fail_check;
+ }
+
+- rc = cifs_are_all_path_components_accessible(server,
++ if (rc != -EREMOTE) {
++ rc = cifs_are_all_path_components_accessible(server,
+ xid, tcon, cifs_sb,
+ full_path);
+- if (rc != 0) {
+- cifs_dbg(VFS, "cannot query dirs between root and final path, "
+- "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
+- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+- rc = 0;
++ if (rc != 0) {
++ cifs_dbg(VFS, "cannot query dirs between root and final path, "
++ "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
++ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
++ rc = 0;
++ }
+ }
+ kfree(full_path);
+ }
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 0068e82217c3..72f270d4bd17 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -305,6 +305,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ cfile->tlink = cifs_get_tlink(tlink);
+ INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
+ mutex_init(&cfile->fh_mutex);
++ spin_lock_init(&cfile->file_info_lock);
+
+ cifs_sb_active(inode->i_sb);
+
+@@ -317,7 +318,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ oplock = 0;
+ }
+
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
+ oplock = fid->pending_open->oplock;
+ list_del(&fid->pending_open->olist);
+@@ -326,12 +327,13 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ server->ops->set_fid(cfile, fid, oplock);
+
+ list_add(&cfile->tlist, &tcon->openFileList);
++
+ /* if readable file instance put first in list*/
+ if (file->f_mode & FMODE_READ)
+ list_add(&cfile->flist, &cinode->openFileList);
+ else
+ list_add_tail(&cfile->flist, &cinode->openFileList);
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+
+ if (fid->purge_cache)
+ cifs_zap_mapping(inode);
+@@ -343,16 +345,16 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ struct cifsFileInfo *
+ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
+ {
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&cifs_file->file_info_lock);
+ cifsFileInfo_get_locked(cifs_file);
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&cifs_file->file_info_lock);
+ return cifs_file;
+ }
+
+ /*
+ * Release a reference on the file private data. This may involve closing
+ * the filehandle out on the server. Must be called without holding
+- * cifs_file_list_lock.
++ * tcon->open_file_lock and cifs_file->file_info_lock.
+ */
+ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+ {
+@@ -367,11 +369,15 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+ struct cifs_pending_open open;
+ bool oplock_break_cancelled;
+
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
++
++ spin_lock(&cifs_file->file_info_lock);
+ if (--cifs_file->count > 0) {
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&cifs_file->file_info_lock);
++ spin_unlock(&tcon->open_file_lock);
+ return;
+ }
++ spin_unlock(&cifs_file->file_info_lock);
+
+ if (server->ops->get_lease_key)
+ server->ops->get_lease_key(inode, &fid);
+@@ -395,7 +401,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+ set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
+ cifs_set_oplock_level(cifsi, 0);
+ }
+- spin_unlock(&cifs_file_list_lock);
++
++ spin_unlock(&tcon->open_file_lock);
+
+ oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
+
+@@ -772,10 +779,10 @@ int cifs_closedir(struct inode *inode, struct file *file)
+ server = tcon->ses->server;
+
+ cifs_dbg(FYI, "Freeing private data in close dir\n");
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&cfile->file_info_lock);
+ if (server->ops->dir_needs_close(cfile)) {
+ cfile->invalidHandle = true;
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&cfile->file_info_lock);
+ if (server->ops->close_dir)
+ rc = server->ops->close_dir(xid, tcon, &cfile->fid);
+ else
+@@ -784,7 +791,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
+ /* not much we can do if it fails anyway, ignore rc */
+ rc = 0;
+ } else
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&cfile->file_info_lock);
+
+ buf = cfile->srch_inf.ntwrk_buf_start;
+ if (buf) {
+@@ -1720,12 +1727,13 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
+ {
+ struct cifsFileInfo *open_file = NULL;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
++ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+
+ /* only filter by fsuid on multiuser mounts */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ fsuid_only = false;
+
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ /* we could simply get the first_list_entry since write-only entries
+ are always at the end of the list but since the first entry might
+ have a close pending, we go through the whole list */
+@@ -1736,8 +1744,8 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
+ if (!open_file->invalidHandle) {
+ /* found a good file */
+ /* lock it so it will not be closed on us */
+- cifsFileInfo_get_locked(open_file);
+- spin_unlock(&cifs_file_list_lock);
++ cifsFileInfo_get(open_file);
++ spin_unlock(&tcon->open_file_lock);
+ return open_file;
+ } /* else might as well continue, and look for
+ another, or simply have the caller reopen it
+@@ -1745,7 +1753,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
+ } else /* write only file */
+ break; /* write only files are last so must be done */
+ }
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ return NULL;
+ }
+
+@@ -1754,6 +1762,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
+ {
+ struct cifsFileInfo *open_file, *inv_file = NULL;
+ struct cifs_sb_info *cifs_sb;
++ struct cifs_tcon *tcon;
+ bool any_available = false;
+ int rc;
+ unsigned int refind = 0;
+@@ -1769,15 +1778,16 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
+ }
+
+ cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
++ tcon = cifs_sb_master_tcon(cifs_sb);
+
+ /* only filter by fsuid on multiuser mounts */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ fsuid_only = false;
+
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ refind_writable:
+ if (refind > MAX_REOPEN_ATT) {
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ return NULL;
+ }
+ list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+@@ -1788,8 +1798,8 @@ refind_writable:
+ if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+ if (!open_file->invalidHandle) {
+ /* found a good writable file */
+- cifsFileInfo_get_locked(open_file);
+- spin_unlock(&cifs_file_list_lock);
++ cifsFileInfo_get(open_file);
++ spin_unlock(&tcon->open_file_lock);
+ return open_file;
+ } else {
+ if (!inv_file)
+@@ -1805,24 +1815,24 @@ refind_writable:
+
+ if (inv_file) {
+ any_available = false;
+- cifsFileInfo_get_locked(inv_file);
++ cifsFileInfo_get(inv_file);
+ }
+
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+
+ if (inv_file) {
+ rc = cifs_reopen_file(inv_file, false);
+ if (!rc)
+ return inv_file;
+ else {
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ list_move_tail(&inv_file->flist,
+ &cifs_inode->openFileList);
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ cifsFileInfo_put(inv_file);
+- spin_lock(&cifs_file_list_lock);
+ ++refind;
+ inv_file = NULL;
++ spin_lock(&tcon->open_file_lock);
+ goto refind_writable;
+ }
+ }
+@@ -3632,15 +3642,17 @@ static int cifs_readpage(struct file *file, struct page *page)
+ static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
+ {
+ struct cifsFileInfo *open_file;
++ struct cifs_tcon *tcon =
++ cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
+
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+ if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ return 1;
+ }
+ }
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ return 0;
+ }
+
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 8442b8b8e0be..2396ab099849 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -120,6 +120,7 @@ tconInfoAlloc(void)
+ ++ret_buf->tc_count;
+ INIT_LIST_HEAD(&ret_buf->openFileList);
+ INIT_LIST_HEAD(&ret_buf->tcon_list);
++ spin_lock_init(&ret_buf->open_file_lock);
+ #ifdef CONFIG_CIFS_STATS
+ spin_lock_init(&ret_buf->stat_lock);
+ #endif
+@@ -465,7 +466,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
+ continue;
+
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ list_for_each(tmp2, &tcon->openFileList) {
+ netfile = list_entry(tmp2, struct cifsFileInfo,
+ tlist);
+@@ -495,11 +496,11 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
+ &netfile->oplock_break);
+ netfile->oplock_break_cancelled = false;
+
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return true;
+ }
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ cifs_dbg(FYI, "No matching file for oplock break\n");
+ return true;
+@@ -613,9 +614,9 @@ backup_cred(struct cifs_sb_info *cifs_sb)
+ void
+ cifs_del_pending_open(struct cifs_pending_open *open)
+ {
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
+ list_del(&open->olist);
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
+ }
+
+ void
+@@ -635,7 +636,7 @@ void
+ cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
+ struct cifs_pending_open *open)
+ {
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tlink_tcon(tlink)->open_file_lock);
+ cifs_add_pending_open_locked(fid, tlink, open);
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
+ }
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index b30a4a6d98a0..833e5844a2db 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -594,14 +594,14 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
+ is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) {
+ /* close and restart search */
+ cifs_dbg(FYI, "search backing up - close and restart search\n");
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&cfile->file_info_lock);
+ if (server->ops->dir_needs_close(cfile)) {
+ cfile->invalidHandle = true;
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&cfile->file_info_lock);
+ if (server->ops->close_dir)
+ server->ops->close_dir(xid, tcon, &cfile->fid);
+ } else
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&cfile->file_info_lock);
+ if (cfile->srch_inf.ntwrk_buf_start) {
+ cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n");
+ if (cfile->srch_inf.smallBuf)
+diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
+index 0ffa18094335..238759c146ba 100644
+--- a/fs/cifs/smb2glob.h
++++ b/fs/cifs/smb2glob.h
+@@ -61,4 +61,14 @@
+ /* Maximum buffer size value we can send with 1 credit */
+ #define SMB2_MAX_BUFFER_SIZE 65536
+
++/*
++ * Maximum number of credits to keep available.
++ * This value is chosen somewhat arbitrarily. The Windows client
++ * defaults to 128 credits, the Windows server allows clients up to
++ * 512 credits, and the NetApp server does not limit clients at all.
++ * Choose a high enough value such that the client shouldn't limit
++ * performance.
++ */
++#define SMB2_MAX_CREDITS_AVAILABLE 32000
++
+ #endif /* _SMB2_GLOB_H */
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 4f0231e685a9..1238cd3552f9 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -266,9 +266,15 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
+ struct tcon_link *tlink;
+ int rc;
+
++ if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
++ (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
++ (buf->Attributes == 0))
++ return 0; /* would be a no op, no sense sending this */
++
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
++
+ rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path,
+ FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
+ SMB2_OP_SET_INFO);
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index 1c5907019045..e5bc85e49be7 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -525,19 +525,19 @@ smb2_is_valid_lease_break(char *buffer)
+ list_for_each(tmp1, &server->smb_ses_list) {
+ ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
+
+- spin_lock(&cifs_file_list_lock);
+ list_for_each(tmp2, &ses->tcon_list) {
+ tcon = list_entry(tmp2, struct cifs_tcon,
+ tcon_list);
++ spin_lock(&tcon->open_file_lock);
+ cifs_stats_inc(
+ &tcon->stats.cifs_stats.num_oplock_brks);
+ if (smb2_tcon_has_lease(tcon, rsp, lw)) {
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return true;
+ }
++ spin_unlock(&tcon->open_file_lock);
+ }
+- spin_unlock(&cifs_file_list_lock);
+ }
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+@@ -579,7 +579,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ list_for_each(tmp2, &tcon->openFileList) {
+ cfile = list_entry(tmp2, struct cifsFileInfo,
+ tlist);
+@@ -591,7 +591,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+
+ cifs_dbg(FYI, "file id match, oplock break\n");
+ cinode = CIFS_I(d_inode(cfile->dentry));
+-
++ spin_lock(&cfile->file_info_lock);
+ if (!CIFS_CACHE_WRITE(cinode) &&
+ rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE)
+ cfile->oplock_break_cancelled = true;
+@@ -613,14 +613,14 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ clear_bit(
+ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+ &cinode->flags);
+-
++ spin_unlock(&cfile->file_info_lock);
+ queue_work(cifsiod_wq, &cfile->oplock_break);
+
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return true;
+ }
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ cifs_dbg(FYI, "No matching file for oplock break\n");
+ return true;
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index dd8543caa56e..be34b4860675 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -282,7 +282,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
+ cifs_dbg(FYI, "Link Speed %lld\n",
+ le64_to_cpu(out_buf->LinkSpeed));
+ }
+-
++ kfree(out_buf);
+ return rc;
+ }
+ #endif /* STATS2 */
+@@ -536,6 +536,7 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
+ server->ops->set_oplock_level(cinode, oplock, fid->epoch,
+ &fid->purge_cache);
+ cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
++ memcpy(cfile->fid.create_guid, fid->create_guid, 16);
+ }
+
+ static void
+@@ -694,6 +695,7 @@ smb2_clone_range(const unsigned int xid,
+
+ cchunk_out:
+ kfree(pcchunk);
++ kfree(retbuf);
+ return rc;
+ }
+
+@@ -818,7 +820,6 @@ smb2_duplicate_extents(const unsigned int xid,
+ {
+ int rc;
+ unsigned int ret_data_len;
+- char *retbuf = NULL;
+ struct duplicate_extents_to_file dup_ext_buf;
+ struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
+
+@@ -844,7 +845,7 @@ smb2_duplicate_extents(const unsigned int xid,
+ FSCTL_DUPLICATE_EXTENTS_TO_FILE,
+ true /* is_fsctl */, (char *)&dup_ext_buf,
+ sizeof(struct duplicate_extents_to_file),
+- (char **)&retbuf,
++ NULL,
+ &ret_data_len);
+
+ if (ret_data_len > 0)
+@@ -867,7 +868,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile)
+ {
+ struct fsctl_set_integrity_information_req integr_info;
+- char *retbuf = NULL;
+ unsigned int ret_data_len;
+
+ integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
+@@ -879,7 +879,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
+ FSCTL_SET_INTEGRITY_INFORMATION,
+ true /* is_fsctl */, (char *)&integr_info,
+ sizeof(struct fsctl_set_integrity_information_req),
+- (char **)&retbuf,
++ NULL,
+ &ret_data_len);
+
+ }
+@@ -1036,7 +1036,7 @@ smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
+ static void
+ smb2_new_lease_key(struct cifs_fid *fid)
+ {
+- get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
++ generate_random_uuid(fid->lease_key);
+ }
+
+ #define SMB2_SYMLINK_STRUCT_SIZE \
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 0b6dc1942bdc..0dbbdf5e4aee 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -103,7 +103,21 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
+ hdr->ProtocolId[3] = 'B';
+ hdr->StructureSize = cpu_to_le16(64);
+ hdr->Command = smb2_cmd;
+- hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
++ if (tcon && tcon->ses && tcon->ses->server) {
++ struct TCP_Server_Info *server = tcon->ses->server;
++
++ spin_lock(&server->req_lock);
++ /* Request up to 2 credits but don't go over the limit. */
++ if (server->credits >= SMB2_MAX_CREDITS_AVAILABLE)
++ hdr->CreditRequest = cpu_to_le16(0);
++ else
++ hdr->CreditRequest = cpu_to_le16(
++ min_t(int, SMB2_MAX_CREDITS_AVAILABLE -
++ server->credits, 2));
++ spin_unlock(&server->req_lock);
++ } else {
++ hdr->CreditRequest = cpu_to_le16(2);
++ }
+ hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
+
+ if (!tcon)
+@@ -593,6 +607,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ char *security_blob = NULL;
+ unsigned char *ntlmssp_blob = NULL;
+ bool use_spnego = false; /* else use raw ntlmssp */
++ u64 previous_session = ses->Suid;
+
+ cifs_dbg(FYI, "Session Setup\n");
+
+@@ -630,6 +645,10 @@ ssetup_ntlmssp_authenticate:
+ return rc;
+
+ req->hdr.SessionId = 0; /* First session, not a reauthenticate */
++
++ /* if reconnect, we need to send previous sess id, otherwise it is 0 */
++ req->PreviousSessionId = previous_session;
++
+ req->Flags = 0; /* MBZ */
+ /* to enable echos and oplocks */
+ req->hdr.CreditRequest = cpu_to_le16(3);
+@@ -1167,7 +1186,7 @@ create_durable_v2_buf(struct cifs_fid *pfid)
+
+ buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
+ buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
+- get_random_bytes(buf->dcontext.CreateGuid, 16);
++ generate_random_uuid(buf->dcontext.CreateGuid);
+ memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
+
+ /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
+@@ -2059,6 +2078,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
+ if (rdata->credits) {
+ buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
+ SMB2_MAX_BUFFER_SIZE));
++ buf->CreditRequest = buf->CreditCharge;
+ spin_lock(&server->req_lock);
+ server->credits += rdata->credits -
+ le16_to_cpu(buf->CreditCharge);
+@@ -2245,6 +2265,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ if (wdata->credits) {
+ req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
+ SMB2_MAX_BUFFER_SIZE));
++ req->hdr.CreditRequest = req->hdr.CreditCharge;
+ spin_lock(&server->req_lock);
+ server->credits += wdata->credits -
+ le16_to_cpu(req->hdr.CreditCharge);
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
+index 4af52780ec35..b8f553b32dda 100644
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -276,7 +276,7 @@ struct smb2_sess_setup_req {
+ __le32 Channel;
+ __le16 SecurityBufferOffset;
+ __le16 SecurityBufferLength;
+- __le64 PreviousSessionId;
++ __u64 PreviousSessionId;
+ __u8 Buffer[1]; /* variable length GSS security buffer */
+ } __packed;
+
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index 1420a3c614af..5d09ea585840 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -223,14 +223,18 @@ static struct attribute *ext4_attrs[] = {
+ EXT4_ATTR_FEATURE(lazy_itable_init);
+ EXT4_ATTR_FEATURE(batched_discard);
+ EXT4_ATTR_FEATURE(meta_bg_resize);
++#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ EXT4_ATTR_FEATURE(encryption);
++#endif
+ EXT4_ATTR_FEATURE(metadata_csum_seed);
+
+ static struct attribute *ext4_feat_attrs[] = {
+ ATTR_LIST(lazy_itable_init),
+ ATTR_LIST(batched_discard),
+ ATTR_LIST(meta_bg_resize),
++#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ ATTR_LIST(encryption),
++#endif
+ ATTR_LIST(metadata_csum_seed),
+ NULL,
+ };
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index d67a16f2a45d..350f67fb5b9c 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -690,6 +690,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
+ pri_bh = NULL;
+
+ root_found:
++ /* We don't support read-write mounts */
++ if (!(s->s_flags & MS_RDONLY)) {
++ error = -EACCES;
++ goto out_freebh;
++ }
+
+ if (joliet_level && (pri == NULL || !opt.rock)) {
+ /* This is the case of Joliet with the norock mount flag.
+@@ -1503,9 +1508,6 @@ struct inode *__isofs_iget(struct super_block *sb,
+ static struct dentry *isofs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+ {
+- /* We don't support read-write mounts */
+- if (!(flags & MS_RDONLY))
+- return ERR_PTR(-EACCES);
+ return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
+ }
+
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index ca181e81c765..fa1b8e0dcacf 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1156,6 +1156,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
+ JBUFFER_TRACE(jh, "file as BJ_Reserved");
+ spin_lock(&journal->j_list_lock);
+ __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
++ spin_unlock(&journal->j_list_lock);
+ } else if (jh->b_transaction == journal->j_committing_transaction) {
+ /* first access by this transaction */
+ jh->b_modified = 0;
+@@ -1163,8 +1164,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
+ JBUFFER_TRACE(jh, "set next transaction");
+ spin_lock(&journal->j_list_lock);
+ jh->b_next_transaction = transaction;
++ spin_unlock(&journal->j_list_lock);
+ }
+- spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
+
+ /*
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 5166adcfc0fb..7af5eeabc80e 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -41,6 +41,17 @@ void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
+ set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
+ }
+
++static bool
++nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
++ fmode_t flags)
++{
++ if (delegation != NULL && (delegation->type & flags) == flags &&
++ !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
++ !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
++ return true;
++ return false;
++}
++
+ static int
+ nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
+ {
+@@ -50,8 +61,7 @@ nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
+ flags &= FMODE_READ|FMODE_WRITE;
+ rcu_read_lock();
+ delegation = rcu_dereference(NFS_I(inode)->delegation);
+- if (delegation != NULL && (delegation->type & flags) == flags &&
+- !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
++ if (nfs4_is_valid_delegation(delegation, flags)) {
+ if (mark)
+ nfs_mark_delegation_referenced(delegation);
+ ret = 1;
+@@ -892,7 +902,7 @@ bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode,
+ flags &= FMODE_READ|FMODE_WRITE;
+ rcu_read_lock();
+ delegation = rcu_dereference(nfsi->delegation);
+- ret = (delegation != NULL && (delegation->type & flags) == flags);
++ ret = nfs4_is_valid_delegation(delegation, flags);
+ if (ret) {
+ nfs4_stateid_copy(dst, &delegation->stateid);
+ nfs_mark_delegation_referenced(delegation);
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 6b1ce9825430..7f1a0fb8c493 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -269,6 +269,7 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server,
+ task = rpc_run_task(&task_setup);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
++ rpc_put_task(task);
+ return 0;
+ }
+
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index d854693a15b0..82dc3035ea45 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1493,6 +1493,9 @@ restart:
+ __func__, status);
+ case -ENOENT:
+ case -ENOMEM:
++ case -EACCES:
++ case -EROFS:
++ case -EIO:
+ case -ESTALE:
+ /* Open state on this file cannot be recovered */
+ nfs4_state_mark_recovery_failed(state, status);
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 9e52609cd683..ea0dd9ee138d 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -25,6 +25,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
+ ssize_t list_size, size, value_size = 0;
+ char *buf, *name, *value = NULL;
+ int uninitialized_var(error);
++ size_t slen;
+
+ if (!old->d_inode->i_op->getxattr ||
+ !new->d_inode->i_op->getxattr)
+@@ -47,7 +48,16 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
+ goto out;
+ }
+
+- for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
++ for (name = buf; list_size; name += slen) {
++ slen = strnlen(name, list_size) + 1;
++
++ /* underlying fs providing us with an broken xattr list? */
++ if (WARN_ON(slen > list_size)) {
++ error = -EIO;
++ break;
++ }
++ list_size -= slen;
++
+ if (ovl_is_private_xattr(name))
+ continue;
+ retry:
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index ba5ef733951f..327177df03a5 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -12,6 +12,7 @@
+ #include <linux/xattr.h>
+ #include <linux/security.h>
+ #include <linux/cred.h>
++#include <linux/atomic.h>
+ #include "overlayfs.h"
+
+ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
+@@ -35,8 +36,10 @@ struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry)
+ {
+ struct dentry *temp;
+ char name[20];
++ static atomic_t temp_id = ATOMIC_INIT(0);
+
+- snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry);
++ /* counter is allowed to wrap, since temp dentries are ephemeral */
++ snprintf(name, sizeof(name), "#%x", atomic_inc_return(&temp_id));
+
+ temp = lookup_one_len(name, workdir, strlen(name));
+ if (!IS_ERR(temp) && temp->d_inode) {
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index 319c3a60cfa5..905caba36529 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -375,13 +375,14 @@ static void ramoops_free_przs(struct ramoops_context *cxt)
+ {
+ int i;
+
+- cxt->max_dump_cnt = 0;
+ if (!cxt->przs)
+ return;
+
+- for (i = 0; !IS_ERR_OR_NULL(cxt->przs[i]); i++)
++ for (i = 0; i < cxt->max_dump_cnt; i++)
+ persistent_ram_free(cxt->przs[i]);
++
+ kfree(cxt->przs);
++ cxt->max_dump_cnt = 0;
+ }
+
+ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
+@@ -406,7 +407,7 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
+ GFP_KERNEL);
+ if (!cxt->przs) {
+ dev_err(dev, "failed to initialize a prz array for dumps\n");
+- goto fail_prz;
++ goto fail_mem;
+ }
+
+ for (i = 0; i < cxt->max_dump_cnt; i++) {
+@@ -417,6 +418,11 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
+ err = PTR_ERR(cxt->przs[i]);
+ dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
+ cxt->record_size, (unsigned long long)*paddr, err);
++
++ while (i > 0) {
++ i--;
++ persistent_ram_free(cxt->przs[i]);
++ }
+ goto fail_prz;
+ }
+ *paddr += cxt->record_size;
+@@ -424,7 +430,9 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
+
+ return 0;
+ fail_prz:
+- ramoops_free_przs(cxt);
++ kfree(cxt->przs);
++fail_mem:
++ cxt->max_dump_cnt = 0;
+ return err;
+ }
+
+@@ -583,7 +591,6 @@ static int ramoops_remove(struct platform_device *pdev)
+ struct ramoops_context *cxt = &oops_cxt;
+
+ pstore_unregister(&cxt->pstore);
+- cxt->max_dump_cnt = 0;
+
+ kfree(cxt->pstore.buf);
+ cxt->pstore.bufsize = 0;
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index 76c3f80efdfa..364d2dffe5a6 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -47,43 +47,10 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
+ return atomic_read(&prz->buffer->start);
+ }
+
+-/* increase and wrap the start pointer, returning the old value */
+-static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
+-{
+- int old;
+- int new;
+-
+- do {
+- old = atomic_read(&prz->buffer->start);
+- new = old + a;
+- while (unlikely(new >= prz->buffer_size))
+- new -= prz->buffer_size;
+- } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
+-
+- return old;
+-}
+-
+-/* increase the size counter until it hits the max size */
+-static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
+-{
+- size_t old;
+- size_t new;
+-
+- if (atomic_read(&prz->buffer->size) == prz->buffer_size)
+- return;
+-
+- do {
+- old = atomic_read(&prz->buffer->size);
+- new = old + a;
+- if (new > prz->buffer_size)
+- new = prz->buffer_size;
+- } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
+-}
+-
+ static DEFINE_RAW_SPINLOCK(buffer_lock);
+
+ /* increase and wrap the start pointer, returning the old value */
+-static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
++static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
+ {
+ int old;
+ int new;
+@@ -103,7 +70,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
+ }
+
+ /* increase the size counter until it hits the max size */
+-static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
++static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
+ {
+ size_t old;
+ size_t new;
+@@ -124,9 +91,6 @@ exit:
+ raw_spin_unlock_irqrestore(&buffer_lock, flags);
+ }
+
+-static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
+-static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
+-
+ static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
+ uint8_t *data, size_t len, uint8_t *ecc)
+ {
+@@ -299,7 +263,7 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
+ const void *s, unsigned int start, unsigned int count)
+ {
+ struct persistent_ram_buffer *buffer = prz->buffer;
+- memcpy(buffer->data + start, s, count);
++ memcpy_toio(buffer->data + start, s, count);
+ persistent_ram_update_ecc(prz, start, count);
+ }
+
+@@ -322,8 +286,8 @@ void persistent_ram_save_old(struct persistent_ram_zone *prz)
+ }
+
+ prz->old_log_size = size;
+- memcpy(prz->old_log, &buffer->data[start], size - start);
+- memcpy(prz->old_log + size - start, &buffer->data[0], start);
++ memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
++ memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
+ }
+
+ int notrace persistent_ram_write(struct persistent_ram_zone *prz,
+@@ -426,9 +390,6 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
+ return NULL;
+ }
+
+- buffer_start_add = buffer_start_add_locked;
+- buffer_size_add = buffer_size_add_locked;
+-
+ if (memtype)
+ va = ioremap(start, size);
+ else
+diff --git a/fs/super.c b/fs/super.c
+index f5f4b328f860..d4d2591b77c8 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -1326,8 +1326,8 @@ int freeze_super(struct super_block *sb)
+ }
+ }
+ /*
+- * This is just for debugging purposes so that fs can warn if it
+- * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
++ * For debugging purposes so that fs can warn if it sees write activity
++ * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
+ */
+ sb->s_writers.frozen = SB_FREEZE_COMPLETE;
+ up_write(&sb->s_umount);
+@@ -1346,7 +1346,7 @@ int thaw_super(struct super_block *sb)
+ int error;
+
+ down_write(&sb->s_umount);
+- if (sb->s_writers.frozen == SB_UNFROZEN) {
++ if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
+ up_write(&sb->s_umount);
+ return -EINVAL;
+ }
+diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
+index e8b01b721e99..b5bf23b34241 100644
+--- a/fs/ubifs/xattr.c
++++ b/fs/ubifs/xattr.c
+@@ -173,6 +173,7 @@ out_cancel:
+ host_ui->xattr_cnt -= 1;
+ host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
+ host_ui->xattr_size -= CALC_XATTR_BYTES(size);
++ host_ui->xattr_names -= nm->len;
+ mutex_unlock(&host_ui->ui_mutex);
+ out_free:
+ make_bad_inode(inode);
+@@ -533,6 +534,7 @@ out_cancel:
+ host_ui->xattr_cnt += 1;
+ host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
+ host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
++ host_ui->xattr_names += nm->len;
+ mutex_unlock(&host_ui->ui_mutex);
+ ubifs_release_budget(c, &req);
+ make_bad_inode(inode);
+diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
+index 0a83a1e648b0..4db00b02ca3f 100644
+--- a/include/linux/devfreq-event.h
++++ b/include/linux/devfreq-event.h
+@@ -148,11 +148,6 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
+ return -EINVAL;
+ }
+
+-static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
+-{
+- return ERR_PTR(-EINVAL);
+-}
+-
+ static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
+ struct device *dev, int index)
+ {
+diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
+index e98425058f20..54048f336a1f 100644
+--- a/include/linux/irqchip/arm-gic-v3.h
++++ b/include/linux/irqchip/arm-gic-v3.h
+@@ -218,7 +218,7 @@
+ #define GITS_BASER_TYPE_SHIFT (56)
+ #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
+ #define GITS_BASER_ENTRY_SIZE_SHIFT (48)
+-#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
++#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
+ #define GITS_BASER_NonShareable (0UL << 10)
+ #define GITS_BASER_InnerShareable (1UL << 10)
+ #define GITS_BASER_OuterShareable (2UL << 10)
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index e571e592e53a..50220cab738c 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -356,6 +356,7 @@ int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
+ int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
+ int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
+ int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
++int __must_check kstrtobool(const char *s, bool *res);
+
+ int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
+ int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
+@@ -367,6 +368,7 @@ int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigne
+ int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
+ int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
+ int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
++int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
+
+ static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
+ {
+diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
+index f09648d14694..782d4e814e21 100644
+--- a/include/linux/lightnvm.h
++++ b/include/linux/lightnvm.h
+@@ -1,6 +1,8 @@
+ #ifndef NVM_H
+ #define NVM_H
+
++#include <linux/types.h>
++
+ enum {
+ NVM_IO_OK = 0,
+ NVM_IO_REQUEUE = 1,
+@@ -11,10 +13,71 @@ enum {
+ NVM_IOTYPE_GC = 1,
+ };
+
++#define NVM_BLK_BITS (16)
++#define NVM_PG_BITS (16)
++#define NVM_SEC_BITS (8)
++#define NVM_PL_BITS (8)
++#define NVM_LUN_BITS (8)
++#define NVM_CH_BITS (8)
++
++struct ppa_addr {
++ /* Generic structure for all addresses */
++ union {
++ struct {
++ u64 blk : NVM_BLK_BITS;
++ u64 pg : NVM_PG_BITS;
++ u64 sec : NVM_SEC_BITS;
++ u64 pl : NVM_PL_BITS;
++ u64 lun : NVM_LUN_BITS;
++ u64 ch : NVM_CH_BITS;
++ } g;
++
++ u64 ppa;
++ };
++};
++
++struct nvm_rq;
++struct nvm_id;
++struct nvm_dev;
++
++typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
++typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
++typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
++typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
++ nvm_l2p_update_fn *, void *);
++typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
++ nvm_bb_update_fn *, void *);
++typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
++typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
++typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
++typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
++typedef void (nvm_destroy_dma_pool_fn)(void *);
++typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
++ dma_addr_t *);
++typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
++
++struct nvm_dev_ops {
++ nvm_id_fn *identity;
++ nvm_get_l2p_tbl_fn *get_l2p_tbl;
++ nvm_op_bb_tbl_fn *get_bb_tbl;
++ nvm_op_set_bb_fn *set_bb_tbl;
++
++ nvm_submit_io_fn *submit_io;
++ nvm_erase_blk_fn *erase_block;
++
++ nvm_create_dma_pool_fn *create_dma_pool;
++ nvm_destroy_dma_pool_fn *destroy_dma_pool;
++ nvm_dev_dma_alloc_fn *dev_dma_alloc;
++ nvm_dev_dma_free_fn *dev_dma_free;
++
++ unsigned int max_phys_sect;
++};
++
++
++
+ #ifdef CONFIG_NVM
+
+ #include <linux/blkdev.h>
+-#include <linux/types.h>
+ #include <linux/file.h>
+ #include <linux/dmapool.h>
+
+@@ -126,29 +189,6 @@ struct nvm_tgt_instance {
+ #define NVM_VERSION_MINOR 0
+ #define NVM_VERSION_PATCH 0
+
+-#define NVM_BLK_BITS (16)
+-#define NVM_PG_BITS (16)
+-#define NVM_SEC_BITS (8)
+-#define NVM_PL_BITS (8)
+-#define NVM_LUN_BITS (8)
+-#define NVM_CH_BITS (8)
+-
+-struct ppa_addr {
+- /* Generic structure for all addresses */
+- union {
+- struct {
+- u64 blk : NVM_BLK_BITS;
+- u64 pg : NVM_PG_BITS;
+- u64 sec : NVM_SEC_BITS;
+- u64 pl : NVM_PL_BITS;
+- u64 lun : NVM_LUN_BITS;
+- u64 ch : NVM_CH_BITS;
+- } g;
+-
+- u64 ppa;
+- };
+-};
+-
+ struct nvm_rq {
+ struct nvm_tgt_instance *ins;
+ struct nvm_dev *dev;
+@@ -182,39 +222,6 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
+
+ struct nvm_block;
+
+-typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
+-typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
+-typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
+-typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
+- nvm_l2p_update_fn *, void *);
+-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
+- nvm_bb_update_fn *, void *);
+-typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
+-typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
+-typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
+-typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
+-typedef void (nvm_destroy_dma_pool_fn)(void *);
+-typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
+- dma_addr_t *);
+-typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
+-
+-struct nvm_dev_ops {
+- nvm_id_fn *identity;
+- nvm_get_l2p_tbl_fn *get_l2p_tbl;
+- nvm_op_bb_tbl_fn *get_bb_tbl;
+- nvm_op_set_bb_fn *set_bb_tbl;
+-
+- nvm_submit_io_fn *submit_io;
+- nvm_erase_blk_fn *erase_block;
+-
+- nvm_create_dma_pool_fn *create_dma_pool;
+- nvm_destroy_dma_pool_fn *destroy_dma_pool;
+- nvm_dev_dma_alloc_fn *dev_dma_alloc;
+- nvm_dev_dma_free_fn *dev_dma_free;
+-
+- unsigned int max_phys_sect;
+-};
+-
+ struct nvm_lun {
+ int id;
+
+diff --git a/include/linux/sem.h b/include/linux/sem.h
+index 976ce3a19f1b..d0efd6e6c20a 100644
+--- a/include/linux/sem.h
++++ b/include/linux/sem.h
+@@ -21,6 +21,7 @@ struct sem_array {
+ struct list_head list_id; /* undo requests on this array */
+ int sem_nsems; /* no. of semaphores in array */
+ int complex_count; /* pending complex operations */
++ bool complex_mode; /* no parallel simple ops */
+ };
+
+ #ifdef CONFIG_SYSVIPC
+diff --git a/include/linux/string.h b/include/linux/string.h
+index 9ef7795e65e4..aa30789b0f65 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -127,7 +127,11 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
+ extern void argv_free(char **argv);
+
+ extern bool sysfs_streq(const char *s1, const char *s2);
+-extern int strtobool(const char *s, bool *res);
++extern int kstrtobool(const char *s, bool *res);
++static inline int strtobool(const char *s, bool *res)
++{
++ return kstrtobool(s, res);
++}
+
+ #ifdef CONFIG_BINARY_PRINTF
+ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index 7ba7dccaf0e7..d8ca2eaa3a8b 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -266,6 +266,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
+
+ static inline void workingset_node_pages_dec(struct radix_tree_node *node)
+ {
++ VM_WARN_ON_ONCE(!workingset_node_pages(node));
+ node->count--;
+ }
+
+@@ -281,6 +282,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
+
+ static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
+ {
++ VM_WARN_ON_ONCE(!workingset_node_shadows(node));
+ node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
+ }
+
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 59081c73b296..6afc6f388edf 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -180,6 +180,7 @@ enum tcm_sense_reason_table {
+ TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15),
+ TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16),
+ TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17),
++ TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18),
+ #undef R
+ };
+
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 20d07008ad5e..9862c3d1c26d 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -155,14 +155,21 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
+
+ /*
+ * Locking:
++ * a) global sem_lock() for read/write
+ * sem_undo.id_next,
+ * sem_array.complex_count,
+- * sem_array.pending{_alter,_cont},
+- * sem_array.sem_undo: global sem_lock() for read/write
+- * sem_undo.proc_next: only "current" is allowed to read/write that field.
++ * sem_array.complex_mode
++ * sem_array.pending{_alter,_const},
++ * sem_array.sem_undo
+ *
++ * b) global or semaphore sem_lock() for read/write:
+ * sem_array.sem_base[i].pending_{const,alter}:
+- * global or semaphore sem_lock() for read/write
++ * sem_array.complex_mode (for read)
++ *
++ * c) special:
++ * sem_undo_list.list_proc:
++ * * undo_list->lock for write
++ * * rcu for read
+ */
+
+ #define sc_semmsl sem_ctls[0]
+@@ -263,24 +270,25 @@ static void sem_rcu_free(struct rcu_head *head)
+ #define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
+
+ /*
+- * Wait until all currently ongoing simple ops have completed.
++ * Enter the mode suitable for non-simple operations:
+ * Caller must own sem_perm.lock.
+- * New simple ops cannot start, because simple ops first check
+- * that sem_perm.lock is free.
+- * that a) sem_perm.lock is free and b) complex_count is 0.
+ */
+-static void sem_wait_array(struct sem_array *sma)
++static void complexmode_enter(struct sem_array *sma)
+ {
+ int i;
+ struct sem *sem;
+
+- if (sma->complex_count) {
+- /* The thread that increased sma->complex_count waited on
+- * all sem->lock locks. Thus we don't need to wait again.
+- */
++ if (sma->complex_mode) {
++ /* We are already in complex_mode. Nothing to do */
+ return;
+ }
+
++ /* We need a full barrier after seting complex_mode:
++ * The write to complex_mode must be visible
++ * before we read the first sem->lock spinlock state.
++ */
++ smp_store_mb(sma->complex_mode, true);
++
+ for (i = 0; i < sma->sem_nsems; i++) {
+ sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+@@ -289,6 +297,28 @@ static void sem_wait_array(struct sem_array *sma)
+ }
+
+ /*
++ * Try to leave the mode that disallows simple operations:
++ * Caller must own sem_perm.lock.
++ */
++static void complexmode_tryleave(struct sem_array *sma)
++{
++ if (sma->complex_count) {
++ /* Complex ops are sleeping.
++ * We must stay in complex mode
++ */
++ return;
++ }
++ /*
++ * Immediately after setting complex_mode to false,
++ * a simple op can start. Thus: all memory writes
++ * performed by the current operation must be visible
++ * before we set complex_mode to false.
++ */
++ smp_store_release(&sma->complex_mode, false);
++}
++
++#define SEM_GLOBAL_LOCK (-1)
++/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+@@ -304,56 +334,42 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ /* Complex operation - acquire a full lock */
+ ipc_lock_object(&sma->sem_perm);
+
+- /* And wait until all simple ops that are processed
+- * right now have dropped their locks.
+- */
+- sem_wait_array(sma);
+- return -1;
++ /* Prevent parallel simple ops */
++ complexmode_enter(sma);
++ return SEM_GLOBAL_LOCK;
+ }
+
+ /*
+ * Only one semaphore affected - try to optimize locking.
+- * The rules are:
+- * - optimized locking is possible if no complex operation
+- * is either enqueued or processed right now.
+- * - The test for enqueued complex ops is simple:
+- * sma->complex_count != 0
+- * - Testing for complex ops that are processed right now is
+- * a bit more difficult. Complex ops acquire the full lock
+- * and first wait that the running simple ops have completed.
+- * (see above)
+- * Thus: If we own a simple lock and the global lock is free
+- * and complex_count is now 0, then it will stay 0 and
+- * thus just locking sem->lock is sufficient.
++ * Optimized locking is possible if no complex operation
++ * is either enqueued or processed right now.
++ *
++ * Both facts are tracked by complex_mode.
+ */
+ sem = sma->sem_base + sops->sem_num;
+
+- if (sma->complex_count == 0) {
++ /*
++ * Initial check for complex_mode. Just an optimization,
++ * no locking, no memory barrier.
++ */
++ if (!sma->complex_mode) {
+ /*
+ * It appears that no complex operation is around.
+ * Acquire the per-semaphore lock.
+ */
+ spin_lock(&sem->lock);
+
+- /* Then check that the global lock is free */
+- if (!spin_is_locked(&sma->sem_perm.lock)) {
+- /*
+- * We need a memory barrier with acquire semantics,
+- * otherwise we can race with another thread that does:
+- * complex_count++;
+- * spin_unlock(sem_perm.lock);
+- */
+- ipc_smp_acquire__after_spin_is_unlocked();
++ /*
++ * See 51d7d5205d33
++ * ("powerpc: Add smp_mb() to arch_spin_is_locked()"):
++ * A full barrier is required: the write of sem->lock
++ * must be visible before the read is executed
++ */
++ smp_mb();
+
+- /*
+- * Now repeat the test of complex_count:
+- * It can't change anymore until we drop sem->lock.
+- * Thus: if is now 0, then it will stay 0.
+- */
+- if (sma->complex_count == 0) {
+- /* fast path successful! */
+- return sops->sem_num;
+- }
++ if (!smp_load_acquire(&sma->complex_mode)) {
++ /* fast path successful! */
++ return sops->sem_num;
+ }
+ spin_unlock(&sem->lock);
+ }
+@@ -373,15 +389,16 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ /* Not a false alarm, thus complete the sequence for a
+ * full lock.
+ */
+- sem_wait_array(sma);
+- return -1;
++ complexmode_enter(sma);
++ return SEM_GLOBAL_LOCK;
+ }
+ }
+
+ static inline void sem_unlock(struct sem_array *sma, int locknum)
+ {
+- if (locknum == -1) {
++ if (locknum == SEM_GLOBAL_LOCK) {
+ unmerge_queues(sma);
++ complexmode_tryleave(sma);
+ ipc_unlock_object(&sma->sem_perm);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+@@ -533,6 +550,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
+ }
+
+ sma->complex_count = 0;
++ sma->complex_mode = true; /* dropped by sem_unlock below */
+ INIT_LIST_HEAD(&sma->pending_alter);
+ INIT_LIST_HEAD(&sma->pending_const);
+ INIT_LIST_HEAD(&sma->list_id);
+@@ -2186,10 +2204,10 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
+ /*
+ * The proc interface isn't aware of sem_lock(), it calls
+ * ipc_lock_object() directly (in sysvipc_find_ipc).
+- * In order to stay compatible with sem_lock(), we must wait until
+- * all simple semop() calls have left their critical regions.
++ * In order to stay compatible with sem_lock(), we must
++ * enter / leave complex_mode.
+ */
+- sem_wait_array(sma);
++ complexmode_enter(sma);
+
+ sem_otime = get_semotime(sma);
+
+@@ -2206,6 +2224,8 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
+ sem_otime,
+ sma->sem_ctime);
+
++ complexmode_tryleave(sma);
++
+ return 0;
+ }
+ #endif
+diff --git a/lib/kstrtox.c b/lib/kstrtox.c
+index 94be244e8441..d8a5cf66c316 100644
+--- a/lib/kstrtox.c
++++ b/lib/kstrtox.c
+@@ -321,6 +321,70 @@ int kstrtos8(const char *s, unsigned int base, s8 *res)
+ }
+ EXPORT_SYMBOL(kstrtos8);
+
++/**
++ * kstrtobool - convert common user inputs into boolean values
++ * @s: input string
++ * @res: result
++ *
++ * This routine returns 0 iff the first character is one of 'Yy1Nn0', or
++ * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value
++ * pointed to by res is updated upon finding a match.
++ */
++int kstrtobool(const char *s, bool *res)
++{
++ if (!s)
++ return -EINVAL;
++
++ switch (s[0]) {
++ case 'y':
++ case 'Y':
++ case '1':
++ *res = true;
++ return 0;
++ case 'n':
++ case 'N':
++ case '0':
++ *res = false;
++ return 0;
++ case 'o':
++ case 'O':
++ switch (s[1]) {
++ case 'n':
++ case 'N':
++ *res = true;
++ return 0;
++ case 'f':
++ case 'F':
++ *res = false;
++ return 0;
++ default:
++ break;
++ }
++ default:
++ break;
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL(kstrtobool);
++
++/*
++ * Since "base" would be a nonsense argument, this open-codes the
++ * _from_user helper instead of using the helper macro below.
++ */
++int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
++{
++ /* Longest string needed to differentiate, newline, terminator */
++ char buf[4];
++
++ count = min(count, sizeof(buf) - 1);
++ if (copy_from_user(buf, s, count))
++ return -EFAULT;
++ buf[count] = '\0';
++ return kstrtobool(buf, res);
++}
++EXPORT_SYMBOL(kstrtobool_from_user);
++
+ #define kstrto_from_user(f, g, type) \
+ int f(const char __user *s, size_t count, unsigned int base, type *res) \
+ { \
+diff --git a/lib/string.c b/lib/string.c
+index 0323c0d5629a..1a90db9bc6e1 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -630,35 +630,6 @@ bool sysfs_streq(const char *s1, const char *s2)
+ }
+ EXPORT_SYMBOL(sysfs_streq);
+
+-/**
+- * strtobool - convert common user inputs into boolean values
+- * @s: input string
+- * @res: result
+- *
+- * This routine returns 0 iff the first character is one of 'Yy1Nn0'.
+- * Otherwise it will return -EINVAL. Value pointed to by res is
+- * updated upon finding a match.
+- */
+-int strtobool(const char *s, bool *res)
+-{
+- switch (s[0]) {
+- case 'y':
+- case 'Y':
+- case '1':
+- *res = true;
+- break;
+- case 'n':
+- case 'N':
+- case '0':
+- *res = false;
+- break;
+- default:
+- return -EINVAL;
+- }
+- return 0;
+-}
+-EXPORT_SYMBOL(strtobool);
+-
+ #ifndef __HAVE_ARCH_MEMSET
+ /**
+ * memset - Fill a region of memory with the given value
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 1bb007624b53..c588d1222b2a 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -109,6 +109,48 @@
+ * ->tasklist_lock (memory_failure, collect_procs_ao)
+ */
+
++static int page_cache_tree_insert(struct address_space *mapping,
++ struct page *page, void **shadowp)
++{
++ struct radix_tree_node *node;
++ void **slot;
++ int error;
++
++ error = __radix_tree_create(&mapping->page_tree, page->index,
++ &node, &slot);
++ if (error)
++ return error;
++ if (*slot) {
++ void *p;
++
++ p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
++ if (!radix_tree_exceptional_entry(p))
++ return -EEXIST;
++ if (shadowp)
++ *shadowp = p;
++ mapping->nrshadows--;
++ if (node)
++ workingset_node_shadows_dec(node);
++ }
++ radix_tree_replace_slot(slot, page);
++ mapping->nrpages++;
++ if (node) {
++ workingset_node_pages_inc(node);
++ /*
++ * Don't track node that contains actual pages.
++ *
++ * Avoid acquiring the list_lru lock if already
++ * untracked. The list_empty() test is safe as
++ * node->private_list is protected by
++ * mapping->tree_lock.
++ */
++ if (!list_empty(&node->private_list))
++ list_lru_del(&workingset_shadow_nodes,
++ &node->private_list);
++ }
++ return 0;
++}
++
+ static void page_cache_tree_delete(struct address_space *mapping,
+ struct page *page, void *shadow)
+ {
+@@ -122,6 +164,14 @@ static void page_cache_tree_delete(struct address_space *mapping,
+
+ __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
+
++ if (!node) {
++ /*
++ * We need a node to properly account shadow
++ * entries. Don't plant any without. XXX
++ */
++ shadow = NULL;
++ }
++
+ if (shadow) {
+ mapping->nrshadows++;
+ /*
+@@ -538,9 +588,8 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
+ memcg = mem_cgroup_begin_page_stat(old);
+ spin_lock_irqsave(&mapping->tree_lock, flags);
+ __delete_from_page_cache(old, NULL, memcg);
+- error = radix_tree_insert(&mapping->page_tree, offset, new);
++ error = page_cache_tree_insert(mapping, new, NULL);
+ BUG_ON(error);
+- mapping->nrpages++;
+
+ /*
+ * hugetlb pages do not participate in page cache accounting.
+@@ -562,48 +611,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
+ }
+ EXPORT_SYMBOL_GPL(replace_page_cache_page);
+
+-static int page_cache_tree_insert(struct address_space *mapping,
+- struct page *page, void **shadowp)
+-{
+- struct radix_tree_node *node;
+- void **slot;
+- int error;
+-
+- error = __radix_tree_create(&mapping->page_tree, page->index,
+- &node, &slot);
+- if (error)
+- return error;
+- if (*slot) {
+- void *p;
+-
+- p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+- if (!radix_tree_exceptional_entry(p))
+- return -EEXIST;
+- if (shadowp)
+- *shadowp = p;
+- mapping->nrshadows--;
+- if (node)
+- workingset_node_shadows_dec(node);
+- }
+- radix_tree_replace_slot(slot, page);
+- mapping->nrpages++;
+- if (node) {
+- workingset_node_pages_inc(node);
+- /*
+- * Don't track node that contains actual pages.
+- *
+- * Avoid acquiring the list_lru lock if already
+- * untracked. The list_empty() test is safe as
+- * node->private_list is protected by
+- * mapping->tree_lock.
+- */
+- if (!list_empty(&node->private_list))
+- list_lru_del(&workingset_shadow_nodes,
+- &node->private_list);
+- }
+- return 0;
+-}
+-
+ static int __add_to_page_cache_locked(struct page *page,
+ struct address_space *mapping,
+ pgoff_t offset, gfp_t gfp_mask,
+diff --git a/mm/workingset.c b/mm/workingset.c
+index aa017133744b..df66f426fdcf 100644
+--- a/mm/workingset.c
++++ b/mm/workingset.c
+@@ -341,21 +341,19 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
+ * no pages, so we expect to be able to remove them all and
+ * delete and free the empty node afterwards.
+ */
+-
+- BUG_ON(!node->count);
+- BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
++ BUG_ON(!workingset_node_shadows(node));
++ BUG_ON(workingset_node_pages(node));
+
+ for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
+ if (node->slots[i]) {
+ BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
+ node->slots[i] = NULL;
+- BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));
+- node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
++ workingset_node_shadows_dec(node);
+ BUG_ON(!mapping->nrshadows);
+ mapping->nrshadows--;
+ }
+ }
+- BUG_ON(node->count);
++ BUG_ON(workingset_node_shadows(node));
+ inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM);
+ if (!__radix_tree_delete_node(&mapping->page_tree, node))
+ BUG();
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 1ba417207465..27b6f55fa43a 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -474,7 +474,16 @@ static int xs_nospace(struct rpc_task *task)
+ spin_unlock_bh(&xprt->transport_lock);
+
+ /* Race breaker in case memory is freed before above code is called */
+- sk->sk_write_space(sk);
++ if (ret == -EAGAIN) {
++ struct socket_wq *wq;
++
++ rcu_read_lock();
++ wq = rcu_dereference(sk->sk_wq);
++ set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
++ rcu_read_unlock();
++
++ sk->sk_write_space(sk);
++ }
+ return ret;
+ }
+
+diff --git a/sound/pci/hda/dell_wmi_helper.c b/sound/pci/hda/dell_wmi_helper.c
+index 9c22f95838ef..19d41da79f93 100644
+--- a/sound/pci/hda/dell_wmi_helper.c
++++ b/sound/pci/hda/dell_wmi_helper.c
+@@ -49,7 +49,7 @@ static void alc_fixup_dell_wmi(struct hda_codec *codec,
+ removefunc = true;
+ if (dell_led_set_func(DELL_LED_MICMUTE, false) >= 0) {
+ dell_led_value = 0;
+- if (spec->gen.num_adc_nids > 1)
++ if (spec->gen.num_adc_nids > 1 && !spec->gen.dyn_adc_switch)
+ codec_dbg(codec, "Skipping micmute LED control due to several ADCs");
+ else {
+ dell_old_cap_hook = spec->gen.cap_sync_hook;
+diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
+index 0a4ad5feb82e..12826ac0381f 100644
+--- a/sound/pci/hda/thinkpad_helper.c
++++ b/sound/pci/hda/thinkpad_helper.c
+@@ -75,7 +75,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
+ removefunc = false;
+ }
+ if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
+- if (spec->num_adc_nids > 1)
++ if (spec->num_adc_nids > 1 && !spec->dyn_adc_switch)
+ codec_dbg(codec,
+ "Skipping micmute LED control due to several ADCs");
+ else {
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index 9409d014b46c..71df7acf8643 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -89,6 +89,7 @@ struct intel_pt_decoder {
+ bool pge;
+ bool have_tma;
+ bool have_cyc;
++ bool fixup_last_mtc;
+ uint64_t pos;
+ uint64_t last_ip;
+ uint64_t ip;
+@@ -584,10 +585,31 @@ struct intel_pt_calc_cyc_to_tsc_info {
+ uint64_t tsc_timestamp;
+ uint64_t timestamp;
+ bool have_tma;
++ bool fixup_last_mtc;
+ bool from_mtc;
+ double cbr_cyc_to_tsc;
+ };
+
++/*
++ * MTC provides a 8-bit slice of CTC but the TMA packet only provides the lower
++ * 16 bits of CTC. If mtc_shift > 8 then some of the MTC bits are not in the CTC
++ * provided by the TMA packet. Fix-up the last_mtc calculated from the TMA
++ * packet by copying the missing bits from the current MTC assuming the least
++ * difference between the two, and that the current MTC comes after last_mtc.
++ */
++static void intel_pt_fixup_last_mtc(uint32_t mtc, int mtc_shift,
++ uint32_t *last_mtc)
++{
++ uint32_t first_missing_bit = 1U << (16 - mtc_shift);
++ uint32_t mask = ~(first_missing_bit - 1);
++
++ *last_mtc |= mtc & mask;
++ if (*last_mtc >= mtc) {
++ *last_mtc -= first_missing_bit;
++ *last_mtc &= 0xff;
++ }
++}
++
+ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
+ {
+ struct intel_pt_decoder *decoder = pkt_info->decoder;
+@@ -617,6 +639,11 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
+ return 0;
+
+ mtc = pkt_info->packet.payload;
++ if (decoder->mtc_shift > 8 && data->fixup_last_mtc) {
++ data->fixup_last_mtc = false;
++ intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
++ &data->last_mtc);
++ }
+ if (mtc > data->last_mtc)
+ mtc_delta = mtc - data->last_mtc;
+ else
+@@ -685,6 +712,7 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
+
+ data->ctc_delta = 0;
+ data->have_tma = true;
++ data->fixup_last_mtc = true;
+
+ return 0;
+
+@@ -751,6 +779,7 @@ static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder,
+ .tsc_timestamp = decoder->tsc_timestamp,
+ .timestamp = decoder->timestamp,
+ .have_tma = decoder->have_tma,
++ .fixup_last_mtc = decoder->fixup_last_mtc,
+ .from_mtc = from_mtc,
+ .cbr_cyc_to_tsc = 0,
+ };
+@@ -1241,6 +1270,7 @@ static void intel_pt_calc_tma(struct intel_pt_decoder *decoder)
+ }
+ decoder->ctc_delta = 0;
+ decoder->have_tma = true;
++ decoder->fixup_last_mtc = true;
+ intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x CTC rem %#x\n",
+ decoder->ctc_timestamp, decoder->last_mtc, ctc_rem);
+ }
+@@ -1255,6 +1285,12 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
+
+ mtc = decoder->packet.payload;
+
++ if (decoder->mtc_shift > 8 && decoder->fixup_last_mtc) {
++ decoder->fixup_last_mtc = false;
++ intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
++ &decoder->last_mtc);
++ }
++
+ if (mtc > decoder->last_mtc)
+ mtc_delta = mtc - decoder->last_mtc;
+ else
+@@ -1323,6 +1359,8 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
+ timestamp, decoder->timestamp);
+ else
+ decoder->timestamp = timestamp;
++
++ decoder->timestamp_insn_cnt = 0;
+ }
+
+ /* Walk PSB+ packets when already in sync. */
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index 9227c2f076c3..89927b5beebf 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -238,7 +238,7 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
+ }
+
+ queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
+-
++next:
+ buffer = auxtrace_buffer__next(queue, buffer);
+ if (!buffer) {
+ if (old_buffer)
+@@ -261,9 +261,6 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
+ intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
+ return -ENOMEM;
+
+- if (old_buffer)
+- auxtrace_buffer__drop_data(old_buffer);
+-
+ if (buffer->use_data) {
+ b->len = buffer->use_size;
+ b->buf = buffer->use_data;
+@@ -273,6 +270,16 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
+ }
+ b->ref_timestamp = buffer->reference;
+
++ /*
++ * If in snapshot mode and the buffer has no usable data, get next
++ * buffer and again check overlap against old_buffer.
++ */
++ if (ptq->pt->snapshot_mode && !b->len)
++ goto next;
++
++ if (old_buffer)
++ auxtrace_buffer__drop_data(old_buffer);
++
+ if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode &&
+ !buffer->consecutive)) {
+ b->consecutive = false;