summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2017-05-20 16:18:59 -0400
committerMike Pagano <mpagano@gentoo.org>2017-05-20 16:18:59 -0400
commit7204d256da7c380bc6fcc48f4acebc4b1e6fed07 (patch)
tree3ea7ead03bb7aa4c73f448a898bdaee1205f8fc4
parentLinux patch 4.10.16 (diff)
downloadlinux-patches-7204d256da7c380bc6fcc48f4acebc4b1e6fed07.tar.gz
linux-patches-7204d256da7c380bc6fcc48f4acebc4b1e6fed07.tar.bz2
linux-patches-7204d256da7c380bc6fcc48f4acebc4b1e6fed07.zip
Linux patch 4.10.174.10-184.10
-rw-r--r--0000_README4
-rw-r--r--1016_linux-4.10.17.patch3698
2 files changed, 3702 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 6a98163e..f7274569 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch: 1015_linux-4.10.16.patch
From: http://www.kernel.org
Desc: Linux 4.10.16
+Patch: 1016_linux-4.10.17.patch
+From: http://www.kernel.org
+Desc: Linux 4.10.17
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1016_linux-4.10.17.patch b/1016_linux-4.10.17.patch
new file mode 100644
index 00000000..9f329e26
--- /dev/null
+++ b/1016_linux-4.10.17.patch
@@ -0,0 +1,3698 @@
+diff --git a/Makefile b/Makefile
+index e3e60e71fa78..2e8b04bd180e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 10
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
+index c2b131527a64..a08d7a93aebb 100644
+--- a/arch/arm/kvm/psci.c
++++ b/arch/arm/kvm/psci.c
+@@ -208,9 +208,10 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
+
+ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ {
+- int ret = 1;
++ struct kvm *kvm = vcpu->kvm;
+ unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
+ unsigned long val;
++ int ret = 1;
+
+ switch (psci_fn) {
+ case PSCI_0_2_FN_PSCI_VERSION:
+@@ -230,7 +231,9 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ break;
+ case PSCI_0_2_FN_CPU_ON:
+ case PSCI_0_2_FN64_CPU_ON:
++ mutex_lock(&kvm->lock);
+ val = kvm_psci_vcpu_on(vcpu);
++ mutex_unlock(&kvm->lock);
+ break;
+ case PSCI_0_2_FN_AFFINITY_INFO:
+ case PSCI_0_2_FN64_AFFINITY_INFO:
+@@ -279,6 +282,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+
+ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ {
++ struct kvm *kvm = vcpu->kvm;
+ unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
+ unsigned long val;
+
+@@ -288,7 +292,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ val = PSCI_RET_SUCCESS;
+ break;
+ case KVM_PSCI_FN_CPU_ON:
++ mutex_lock(&kvm->lock);
+ val = kvm_psci_vcpu_on(vcpu);
++ mutex_unlock(&kvm->lock);
+ break;
+ default:
+ val = PSCI_RET_NOT_SUPPORTED;
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
+index f5ea0ba70f07..fe39e6841326 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -240,6 +240,12 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
+ return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
+ }
+
++static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
++{
++ u32 esr = kvm_vcpu_get_hsr(vcpu);
++ return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
++}
++
+ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
+ {
+ return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 87e7e6608cd8..7cee552ce0bf 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1573,8 +1573,8 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
+ {
+ struct sys_reg_params params;
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+- int Rt = (hsr >> 5) & 0xf;
+- int Rt2 = (hsr >> 10) & 0xf;
++ int Rt = kvm_vcpu_sys_get_rt(vcpu);
++ int Rt2 = (hsr >> 10) & 0x1f;
+
+ params.is_aarch32 = true;
+ params.is_32bit = false;
+@@ -1625,7 +1625,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
+ {
+ struct sys_reg_params params;
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+- int Rt = (hsr >> 5) & 0xf;
++ int Rt = kvm_vcpu_sys_get_rt(vcpu);
+
+ params.is_aarch32 = true;
+ params.is_32bit = true;
+@@ -1740,7 +1740,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+ struct sys_reg_params params;
+ unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+- int Rt = (esr >> 5) & 0x1f;
++ int Rt = kvm_vcpu_sys_get_rt(vcpu);
+ int ret;
+
+ trace_kvm_handle_sys_reg(esr);
+diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
+index d5e2b8309939..021db31b40ba 100644
+--- a/arch/powerpc/kernel/nvram_64.c
++++ b/arch/powerpc/kernel/nvram_64.c
+@@ -561,6 +561,7 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
+ static struct pstore_info nvram_pstore_info = {
+ .owner = THIS_MODULE,
+ .name = "nvram",
++ .flags = PSTORE_FLAGS_DMESG,
+ .open = nvram_pstore_open,
+ .read = nvram_pstore_read,
+ .write = nvram_pstore_write,
+diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
+index e5612f3e3b57..d7ac721a8a96 100644
+--- a/arch/x86/boot/boot.h
++++ b/arch/x86/boot/boot.h
+@@ -16,7 +16,7 @@
+ #ifndef BOOT_BOOT_H
+ #define BOOT_BOOT_H
+
+-#define STACK_SIZE 512 /* Minimum number of bytes for stack */
++#define STACK_SIZE 1024 /* Minimum number of bytes for stack */
+
+ #ifndef __ASSEMBLY__
+
+diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
+index 22ef4f72cf32..d7176ea4812c 100644
+--- a/arch/x86/events/intel/rapl.c
++++ b/arch/x86/events/intel/rapl.c
+@@ -761,7 +761,7 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
+
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
+- X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsw_rapl_init),
++ X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
+
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
+diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
+index 529bb4a6487a..e2904373010d 100644
+--- a/arch/x86/include/asm/pmem.h
++++ b/arch/x86/include/asm/pmem.h
+@@ -103,7 +103,7 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
+
+ if (bytes < 8) {
+ if (!IS_ALIGNED(dest, 4) || (bytes != 4))
+- arch_wb_cache_pmem(addr, 1);
++ arch_wb_cache_pmem(addr, bytes);
+ } else {
+ if (!IS_ALIGNED(dest, 8)) {
+ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b3b212f20f78..a158128494de 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3085,6 +3085,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
+ (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
+ return -EINVAL;
+
++ /* INITs are latched while in SMM */
++ if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
++ (events->smi.smm || events->smi.pending) &&
++ vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
++ return -EINVAL;
++
+ process_nmi(vcpu);
+ vcpu->arch.exception.pending = events->exception.injected;
+ vcpu->arch.exception.nr = events->exception.nr;
+@@ -7249,6 +7255,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
+ return -EINVAL;
+
++ /* INITs are latched while in SMM */
++ if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
++ (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
++ mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
++ return -EINVAL;
++
+ if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
+ vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
+ set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
+diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
+index a5c9910d234f..09a085bde0d4 100644
+--- a/arch/x86/um/ptrace_64.c
++++ b/arch/x86/um/ptrace_64.c
+@@ -125,7 +125,7 @@ int poke_user(struct task_struct *child, long addr, long data)
+ else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
+ (addr <= offsetof(struct user, u_debugreg[7]))) {
+ addr -= offsetof(struct user, u_debugreg[0]);
+- addr = addr >> 2;
++ addr = addr >> 3;
+ if ((addr == 4) || (addr == 5))
+ return -EIO;
+ child->thread.arch.debugregs[addr] = data;
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index 7d5afdb417cc..418f1b8576cf 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -2028,7 +2028,8 @@ static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
+
+ /*
+ * Translate a virtual address to a physical one without relying on mapped
+- * page tables.
++ * page tables. Don't rely on big pages being aligned in (guest) physical
++ * space!
+ */
+ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
+ {
+@@ -2049,7 +2050,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
+ sizeof(pud)));
+ if (!pud_present(pud))
+ return 0;
+- pa = pud_pfn(pud) << PAGE_SHIFT;
++ pa = pud_val(pud) & PTE_PFN_MASK;
+ if (pud_large(pud))
+ return pa + (vaddr & ~PUD_MASK);
+
+@@ -2057,7 +2058,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
+ sizeof(pmd)));
+ if (!pmd_present(pmd))
+ return 0;
+- pa = pmd_pfn(pmd) << PAGE_SHIFT;
++ pa = pmd_val(pmd) & PTE_PFN_MASK;
+ if (pmd_large(pmd))
+ return pa + (vaddr & ~PMD_MASK);
+
+diff --git a/block/blk-integrity.c b/block/blk-integrity.c
+index 319f2e4f4a8b..478f572cb1e7 100644
+--- a/block/blk-integrity.c
++++ b/block/blk-integrity.c
+@@ -412,7 +412,8 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
+
+ bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
+ template->flags;
+- bi->interval_exp = ilog2(queue_logical_block_size(disk->queue));
++ bi->interval_exp = template->interval_exp ? :
++ ilog2(queue_logical_block_size(disk->queue));
+ bi->profile = template->profile ? template->profile : &nop_profile;
+ bi->tuple_size = template->tuple_size;
+ bi->tag_size = template->tag_size;
+diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
+index c3177c989dc8..14fd00d6c7f4 100644
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -44,6 +44,11 @@ struct aead_async_req {
+ char iv[];
+ };
+
++struct aead_tfm {
++ struct crypto_aead *aead;
++ bool has_key;
++};
++
+ struct aead_ctx {
+ struct aead_sg_list tsgl;
+ struct aead_async_rsgl first_rsgl;
+@@ -722,24 +727,146 @@ static struct proto_ops algif_aead_ops = {
+ .poll = aead_poll,
+ };
+
++static int aead_check_key(struct socket *sock)
++{
++ int err = 0;
++ struct sock *psk;
++ struct alg_sock *pask;
++ struct aead_tfm *tfm;
++ struct sock *sk = sock->sk;
++ struct alg_sock *ask = alg_sk(sk);
++
++ lock_sock(sk);
++ if (ask->refcnt)
++ goto unlock_child;
++
++ psk = ask->parent;
++ pask = alg_sk(ask->parent);
++ tfm = pask->private;
++
++ err = -ENOKEY;
++ lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
++ if (!tfm->has_key)
++ goto unlock;
++
++ if (!pask->refcnt++)
++ sock_hold(psk);
++
++ ask->refcnt = 1;
++ sock_put(psk);
++
++ err = 0;
++
++unlock:
++ release_sock(psk);
++unlock_child:
++ release_sock(sk);
++
++ return err;
++}
++
++static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
++ size_t size)
++{
++ int err;
++
++ err = aead_check_key(sock);
++ if (err)
++ return err;
++
++ return aead_sendmsg(sock, msg, size);
++}
++
++static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
++ int offset, size_t size, int flags)
++{
++ int err;
++
++ err = aead_check_key(sock);
++ if (err)
++ return err;
++
++ return aead_sendpage(sock, page, offset, size, flags);
++}
++
++static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
++ size_t ignored, int flags)
++{
++ int err;
++
++ err = aead_check_key(sock);
++ if (err)
++ return err;
++
++ return aead_recvmsg(sock, msg, ignored, flags);
++}
++
++static struct proto_ops algif_aead_ops_nokey = {
++ .family = PF_ALG,
++
++ .connect = sock_no_connect,
++ .socketpair = sock_no_socketpair,
++ .getname = sock_no_getname,
++ .ioctl = sock_no_ioctl,
++ .listen = sock_no_listen,
++ .shutdown = sock_no_shutdown,
++ .getsockopt = sock_no_getsockopt,
++ .mmap = sock_no_mmap,
++ .bind = sock_no_bind,
++ .accept = sock_no_accept,
++ .setsockopt = sock_no_setsockopt,
++
++ .release = af_alg_release,
++ .sendmsg = aead_sendmsg_nokey,
++ .sendpage = aead_sendpage_nokey,
++ .recvmsg = aead_recvmsg_nokey,
++ .poll = aead_poll,
++};
++
+ static void *aead_bind(const char *name, u32 type, u32 mask)
+ {
+- return crypto_alloc_aead(name, type, mask);
++ struct aead_tfm *tfm;
++ struct crypto_aead *aead;
++
++ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
++ if (!tfm)
++ return ERR_PTR(-ENOMEM);
++
++ aead = crypto_alloc_aead(name, type, mask);
++ if (IS_ERR(aead)) {
++ kfree(tfm);
++ return ERR_CAST(aead);
++ }
++
++ tfm->aead = aead;
++
++ return tfm;
+ }
+
+ static void aead_release(void *private)
+ {
+- crypto_free_aead(private);
++ struct aead_tfm *tfm = private;
++
++ crypto_free_aead(tfm->aead);
++ kfree(tfm);
+ }
+
+ static int aead_setauthsize(void *private, unsigned int authsize)
+ {
+- return crypto_aead_setauthsize(private, authsize);
++ struct aead_tfm *tfm = private;
++
++ return crypto_aead_setauthsize(tfm->aead, authsize);
+ }
+
+ static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
+ {
+- return crypto_aead_setkey(private, key, keylen);
++ struct aead_tfm *tfm = private;
++ int err;
++
++ err = crypto_aead_setkey(tfm->aead, key, keylen);
++ tfm->has_key = !err;
++
++ return err;
+ }
+
+ static void aead_sock_destruct(struct sock *sk)
+@@ -756,12 +883,14 @@ static void aead_sock_destruct(struct sock *sk)
+ af_alg_release_parent(sk);
+ }
+
+-static int aead_accept_parent(void *private, struct sock *sk)
++static int aead_accept_parent_nokey(void *private, struct sock *sk)
+ {
+ struct aead_ctx *ctx;
+ struct alg_sock *ask = alg_sk(sk);
+- unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
+- unsigned int ivlen = crypto_aead_ivsize(private);
++ struct aead_tfm *tfm = private;
++ struct crypto_aead *aead = tfm->aead;
++ unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead);
++ unsigned int ivlen = crypto_aead_ivsize(aead);
+
+ ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ if (!ctx)
+@@ -788,7 +917,7 @@ static int aead_accept_parent(void *private, struct sock *sk)
+
+ ask->private = ctx;
+
+- aead_request_set_tfm(&ctx->aead_req, private);
++ aead_request_set_tfm(&ctx->aead_req, aead);
+ aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ af_alg_complete, &ctx->completion);
+
+@@ -797,13 +926,25 @@ static int aead_accept_parent(void *private, struct sock *sk)
+ return 0;
+ }
+
++static int aead_accept_parent(void *private, struct sock *sk)
++{
++ struct aead_tfm *tfm = private;
++
++ if (!tfm->has_key)
++ return -ENOKEY;
++
++ return aead_accept_parent_nokey(private, sk);
++}
++
+ static const struct af_alg_type algif_type_aead = {
+ .bind = aead_bind,
+ .release = aead_release,
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .accept = aead_accept_parent,
++ .accept_nokey = aead_accept_parent_nokey,
+ .ops = &algif_aead_ops,
++ .ops_nokey = &algif_aead_ops_nokey,
+ .name = "aead",
+ .owner = THIS_MODULE
+ };
+diff --git a/drivers/Makefile b/drivers/Makefile
+index 060026a02f59..c409c66710ac 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -101,6 +101,7 @@ obj-$(CONFIG_USB_PHY) += usb/
+ obj-$(CONFIG_USB) += usb/
+ obj-$(CONFIG_PCI) += usb/
+ obj-$(CONFIG_USB_GADGET) += usb/
++obj-$(CONFIG_OF) += usb/
+ obj-$(CONFIG_SERIO) += input/serio/
+ obj-$(CONFIG_GAMEPORT) += input/gameport/
+ obj-$(CONFIG_INPUT) += input/
+diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
+index 8f6c23c20c52..deed58013555 100644
+--- a/drivers/bluetooth/hci_bcm.c
++++ b/drivers/bluetooth/hci_bcm.c
+@@ -287,6 +287,9 @@ static int bcm_open(struct hci_uart *hu)
+
+ hu->priv = bcm;
+
++ if (!hu->tty->dev)
++ goto out;
++
+ mutex_lock(&bcm_device_lock);
+ list_for_each(p, &bcm_device_list) {
+ struct bcm_device *dev = list_entry(p, struct bcm_device, list);
+@@ -307,7 +310,7 @@ static int bcm_open(struct hci_uart *hu)
+ }
+
+ mutex_unlock(&bcm_device_lock);
+-
++out:
+ return 0;
+ }
+
+diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
+index 9e271286c5e5..73306384af6c 100644
+--- a/drivers/bluetooth/hci_intel.c
++++ b/drivers/bluetooth/hci_intel.c
+@@ -307,6 +307,9 @@ static int intel_set_power(struct hci_uart *hu, bool powered)
+ struct list_head *p;
+ int err = -ENODEV;
+
++ if (!hu->tty->dev)
++ return err;
++
+ mutex_lock(&intel_device_list_lock);
+
+ list_for_each(p, &intel_device_list) {
+@@ -379,6 +382,9 @@ static void intel_busy_work(struct work_struct *work)
+ struct intel_data *intel = container_of(work, struct intel_data,
+ busy_work);
+
++ if (!intel->hu->tty->dev)
++ return;
++
+ /* Link is busy, delay the suspend */
+ mutex_lock(&intel_device_list_lock);
+ list_for_each(p, &intel_device_list) {
+@@ -889,6 +895,8 @@ static int intel_setup(struct hci_uart *hu)
+ list_for_each(p, &intel_device_list) {
+ struct intel_device *dev = list_entry(p, struct intel_device,
+ list);
++ if (!hu->tty->dev)
++ break;
+ if (hu->tty->dev->parent == dev->pdev->dev.parent) {
+ if (device_may_wakeup(&dev->pdev->dev)) {
+ set_bit(STATE_LPM_ENABLED, &intel->flags);
+@@ -1056,6 +1064,9 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+
+ BT_DBG("hu %p skb %p", hu, skb);
+
++ if (!hu->tty->dev)
++ goto out_enqueue;
++
+ /* Be sure our controller is resumed and potential LPM transaction
+ * completed before enqueuing any packet.
+ */
+@@ -1072,7 +1083,7 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+ }
+ }
+ mutex_unlock(&intel_device_list_lock);
+-
++out_enqueue:
+ skb_queue_tail(&intel->txq, skb);
+
+ return 0;
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index cca6e5bc1cea..51ba67de862e 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -891,6 +891,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ * for details on the intricacies of this.
+ */
+ int left;
++ unsigned char *data_to_send;
+
+ ssif_inc_stat(ssif_info, sent_messages_parts);
+
+@@ -899,6 +900,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ left = 32;
+ /* Length byte. */
+ ssif_info->multi_data[ssif_info->multi_pos] = left;
++ data_to_send = ssif_info->multi_data + ssif_info->multi_pos;
+ ssif_info->multi_pos += left;
+ if (left < 32)
+ /*
+@@ -912,7 +914,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ rv = ssif_i2c_send(ssif_info, msg_written_handler,
+ I2C_SMBUS_WRITE,
+ SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
+- ssif_info->multi_data + ssif_info->multi_pos,
++ data_to_send,
+ I2C_SMBUS_BLOCK_DATA);
+ if (rv < 0) {
+ /* request failed, just return the error. */
+diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
+index 7bc09989e18a..c46eeda71595 100644
+--- a/drivers/crypto/ccp/ccp-dev-v3.c
++++ b/drivers/crypto/ccp/ccp-dev-v3.c
+@@ -315,17 +315,73 @@ static int ccp_perform_ecc(struct ccp_op *op)
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+ }
+
++static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
++{
++ iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
++}
++
++static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
++{
++ iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
++}
++
++static void ccp_irq_bh(unsigned long data)
++{
++ struct ccp_device *ccp = (struct ccp_device *)data;
++ struct ccp_cmd_queue *cmd_q;
++ u32 q_int, status;
++ unsigned int i;
++
++ status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
++
++ for (i = 0; i < ccp->cmd_q_count; i++) {
++ cmd_q = &ccp->cmd_q[i];
++
++ q_int = status & (cmd_q->int_ok | cmd_q->int_err);
++ if (q_int) {
++ cmd_q->int_status = status;
++ cmd_q->q_status = ioread32(cmd_q->reg_status);
++ cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
++
++ /* On error, only save the first error value */
++ if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
++ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
++
++ cmd_q->int_rcvd = 1;
++
++ /* Acknowledge the interrupt and wake the kthread */
++ iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
++ wake_up_interruptible(&cmd_q->int_queue);
++ }
++ }
++ ccp_enable_queue_interrupts(ccp);
++}
++
++static irqreturn_t ccp_irq_handler(int irq, void *data)
++{
++ struct device *dev = data;
++ struct ccp_device *ccp = dev_get_drvdata(dev);
++
++ ccp_disable_queue_interrupts(ccp);
++ if (ccp->use_tasklet)
++ tasklet_schedule(&ccp->irq_tasklet);
++ else
++ ccp_irq_bh((unsigned long)ccp);
++
++ return IRQ_HANDLED;
++}
++
+ static int ccp_init(struct ccp_device *ccp)
+ {
+ struct device *dev = ccp->dev;
+ struct ccp_cmd_queue *cmd_q;
+ struct dma_pool *dma_pool;
+ char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+- unsigned int qmr, qim, i;
++ unsigned int qmr, i;
+ int ret;
+
+ /* Find available queues */
+- qim = 0;
++ ccp->qim = 0;
+ qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+ if (!(qmr & (1 << i)))
+@@ -370,7 +426,7 @@ static int ccp_init(struct ccp_device *ccp)
+ init_waitqueue_head(&cmd_q->int_queue);
+
+ /* Build queue interrupt mask (two interrupts per queue) */
+- qim |= cmd_q->int_ok | cmd_q->int_err;
++ ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
+
+ #ifdef CONFIG_ARM64
+ /* For arm64 set the recommended queue cache settings */
+@@ -388,14 +444,14 @@ static int ccp_init(struct ccp_device *ccp)
+ dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
+
+ /* Disable and clear interrupts until ready */
+- iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
++ ccp_disable_queue_interrupts(ccp);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+- iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
++ iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
+
+ /* Request an irq */
+ ret = ccp->get_irq(ccp);
+@@ -404,6 +460,11 @@ static int ccp_init(struct ccp_device *ccp)
+ goto e_pool;
+ }
+
++ /* Initialize the ISR tasklet? */
++ if (ccp->use_tasklet)
++ tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
++ (unsigned long)ccp);
++
+ dev_dbg(dev, "Starting threads...\n");
+ /* Create a kthread for each queue */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+@@ -426,7 +487,7 @@ static int ccp_init(struct ccp_device *ccp)
+
+ dev_dbg(dev, "Enabling interrupts...\n");
+ /* Enable interrupts */
+- iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
++ ccp_enable_queue_interrupts(ccp);
+
+ dev_dbg(dev, "Registering device...\n");
+ ccp_add_device(ccp);
+@@ -463,7 +524,7 @@ static void ccp_destroy(struct ccp_device *ccp)
+ {
+ struct ccp_cmd_queue *cmd_q;
+ struct ccp_cmd *cmd;
+- unsigned int qim, i;
++ unsigned int i;
+
+ /* Unregister the DMA engine */
+ ccp_dmaengine_unregister(ccp);
+@@ -474,22 +535,15 @@ static void ccp_destroy(struct ccp_device *ccp)
+ /* Remove this device from the list of available units */
+ ccp_del_device(ccp);
+
+- /* Build queue interrupt mask (two interrupt masks per queue) */
+- qim = 0;
+- for (i = 0; i < ccp->cmd_q_count; i++) {
+- cmd_q = &ccp->cmd_q[i];
+- qim |= cmd_q->int_ok | cmd_q->int_err;
+- }
+-
+ /* Disable and clear interrupts */
+- iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
++ ccp_disable_queue_interrupts(ccp);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+- iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
++ iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
+
+ /* Stop the queue kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+@@ -516,40 +570,6 @@ static void ccp_destroy(struct ccp_device *ccp)
+ }
+ }
+
+-static irqreturn_t ccp_irq_handler(int irq, void *data)
+-{
+- struct device *dev = data;
+- struct ccp_device *ccp = dev_get_drvdata(dev);
+- struct ccp_cmd_queue *cmd_q;
+- u32 q_int, status;
+- unsigned int i;
+-
+- status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
+-
+- for (i = 0; i < ccp->cmd_q_count; i++) {
+- cmd_q = &ccp->cmd_q[i];
+-
+- q_int = status & (cmd_q->int_ok | cmd_q->int_err);
+- if (q_int) {
+- cmd_q->int_status = status;
+- cmd_q->q_status = ioread32(cmd_q->reg_status);
+- cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+-
+- /* On error, only save the first error value */
+- if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
+- cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+-
+- cmd_q->int_rcvd = 1;
+-
+- /* Acknowledge the interrupt and wake the kthread */
+- iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
+- wake_up_interruptible(&cmd_q->int_queue);
+- }
+- }
+-
+- return IRQ_HANDLED;
+-}
+-
+ static const struct ccp_actions ccp3_actions = {
+ .aes = ccp_perform_aes,
+ .xts_aes = ccp_perform_xts_aes,
+diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
+index 3422f203455d..3c09904705e2 100644
+--- a/drivers/crypto/ccp/ccp-dev-v5.c
++++ b/drivers/crypto/ccp/ccp-dev-v5.c
+@@ -651,6 +651,65 @@ static int ccp_assign_lsbs(struct ccp_device *ccp)
+ return rc;
+ }
+
++static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
++{
++ unsigned int i;
++
++ for (i = 0; i < ccp->cmd_q_count; i++)
++ iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
++}
++
++static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
++{
++ unsigned int i;
++
++ for (i = 0; i < ccp->cmd_q_count; i++)
++ iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
++}
++
++static void ccp5_irq_bh(unsigned long data)
++{
++ struct ccp_device *ccp = (struct ccp_device *)data;
++ u32 status;
++ unsigned int i;
++
++ for (i = 0; i < ccp->cmd_q_count; i++) {
++ struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
++
++ status = ioread32(cmd_q->reg_interrupt_status);
++
++ if (status) {
++ cmd_q->int_status = status;
++ cmd_q->q_status = ioread32(cmd_q->reg_status);
++ cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
++
++ /* On error, only save the first error value */
++ if ((status & INT_ERROR) && !cmd_q->cmd_error)
++ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
++
++ cmd_q->int_rcvd = 1;
++
++ /* Acknowledge the interrupt and wake the kthread */
++ iowrite32(status, cmd_q->reg_interrupt_status);
++ wake_up_interruptible(&cmd_q->int_queue);
++ }
++ }
++ ccp5_enable_queue_interrupts(ccp);
++}
++
++static irqreturn_t ccp5_irq_handler(int irq, void *data)
++{
++ struct device *dev = data;
++ struct ccp_device *ccp = dev_get_drvdata(dev);
++
++ ccp5_disable_queue_interrupts(ccp);
++ if (ccp->use_tasklet)
++ tasklet_schedule(&ccp->irq_tasklet);
++ else
++ ccp5_irq_bh((unsigned long)ccp);
++ return IRQ_HANDLED;
++}
++
+ static int ccp5_init(struct ccp_device *ccp)
+ {
+ struct device *dev = ccp->dev;
+@@ -735,19 +794,18 @@ static int ccp5_init(struct ccp_device *ccp)
+ dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
+
+ /* Turn off the queues and disable interrupts until ready */
++ ccp5_disable_queue_interrupts(ccp);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ cmd_q->qcontrol = 0; /* Start with nothing */
+ iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+- /* Disable the interrupts */
+- iowrite32(0x00, cmd_q->reg_int_enable);
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+
+- /* Clear the interrupts */
+- iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
++ /* Clear the interrupt status */
++ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
+ }
+
+ dev_dbg(dev, "Requesting an IRQ...\n");
+@@ -757,6 +815,10 @@ static int ccp5_init(struct ccp_device *ccp)
+ dev_err(dev, "unable to allocate an IRQ\n");
+ goto e_pool;
+ }
++ /* Initialize the ISR tasklet */
++ if (ccp->use_tasklet)
++ tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
++ (unsigned long)ccp);
+
+ dev_dbg(dev, "Loading LSB map...\n");
+ /* Copy the private LSB mask to the public registers */
+@@ -825,11 +887,7 @@ static int ccp5_init(struct ccp_device *ccp)
+ }
+
+ dev_dbg(dev, "Enabling interrupts...\n");
+- /* Enable interrupts */
+- for (i = 0; i < ccp->cmd_q_count; i++) {
+- cmd_q = &ccp->cmd_q[i];
+- iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable);
+- }
++ ccp5_enable_queue_interrupts(ccp);
+
+ dev_dbg(dev, "Registering device...\n");
+ /* Put this on the unit list to make it available */
+@@ -881,17 +939,15 @@ static void ccp5_destroy(struct ccp_device *ccp)
+ ccp_del_device(ccp);
+
+ /* Disable and clear interrupts */
++ ccp5_disable_queue_interrupts(ccp);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ /* Turn off the run bit */
+ iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
+
+- /* Disable the interrupts */
+- iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
+-
+ /* Clear the interrupt status */
+- iowrite32(0x00, cmd_q->reg_int_enable);
++ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+@@ -924,38 +980,6 @@ static void ccp5_destroy(struct ccp_device *ccp)
+ }
+ }
+
+-static irqreturn_t ccp5_irq_handler(int irq, void *data)
+-{
+- struct device *dev = data;
+- struct ccp_device *ccp = dev_get_drvdata(dev);
+- u32 status;
+- unsigned int i;
+-
+- for (i = 0; i < ccp->cmd_q_count; i++) {
+- struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
+-
+- status = ioread32(cmd_q->reg_interrupt_status);
+-
+- if (status) {
+- cmd_q->int_status = status;
+- cmd_q->q_status = ioread32(cmd_q->reg_status);
+- cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+-
+- /* On error, only save the first error value */
+- if ((status & INT_ERROR) && !cmd_q->cmd_error)
+- cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+-
+- cmd_q->int_rcvd = 1;
+-
+- /* Acknowledge the interrupt and wake the kthread */
+- iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
+- wake_up_interruptible(&cmd_q->int_queue);
+- }
+- }
+-
+- return IRQ_HANDLED;
+-}
+-
+ static void ccp5_config(struct ccp_device *ccp)
+ {
+ /* Public side */
+diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
+index cd9a7051da3c..20aa30f272b6 100644
+--- a/drivers/crypto/ccp/ccp-dev.h
++++ b/drivers/crypto/ccp/ccp-dev.h
+@@ -109,9 +109,8 @@
+ #define INT_COMPLETION 0x1
+ #define INT_ERROR 0x2
+ #define INT_QUEUE_STOPPED 0x4
+-#define ALL_INTERRUPTS (INT_COMPLETION| \
+- INT_ERROR| \
+- INT_QUEUE_STOPPED)
++#define INT_EMPTY_QUEUE 0x8
++#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR)
+
+ #define LSB_REGION_WIDTH 5
+ #define MAX_LSB_CNT 8
+@@ -337,7 +336,10 @@ struct ccp_device {
+ void *dev_specific;
+ int (*get_irq)(struct ccp_device *ccp);
+ void (*free_irq)(struct ccp_device *ccp);
++ unsigned int qim;
+ unsigned int irq;
++ bool use_tasklet;
++ struct tasklet_struct irq_tasklet;
+
+ /* I/O area used for device communication. The register mapping
+ * starts at an offset into the mapped bar.
+diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
+index 28a9996c1085..e880d4cf4ada 100644
+--- a/drivers/crypto/ccp/ccp-pci.c
++++ b/drivers/crypto/ccp/ccp-pci.c
+@@ -69,6 +69,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
+ goto e_irq;
+ }
+ }
++ ccp->use_tasklet = true;
+
+ return 0;
+
+@@ -100,6 +101,7 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
+ dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
+ goto e_msi;
+ }
++ ccp->use_tasklet = true;
+
+ return 0;
+
+diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
+index 4721d50c4628..cc8f89a86ca2 100644
+--- a/drivers/crypto/s5p-sss.c
++++ b/drivers/crypto/s5p-sss.c
+@@ -287,7 +287,6 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
+ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+ {
+ dev->req->base.complete(&dev->req->base, err);
+- dev->busy = false;
+ }
+
+ static void s5p_unset_outdata(struct s5p_aes_dev *dev)
+@@ -462,7 +461,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ s5p_aes_complete(dev, 0);
+- dev->busy = true;
++ /* Device is still busy */
+ tasklet_schedule(&dev->tasklet);
+ } else {
+ /*
+@@ -483,6 +482,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
+
+ error:
+ s5p_sg_done(dev);
++ dev->busy = false;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ s5p_aes_complete(dev, err);
+
+@@ -634,6 +634,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
+
+ indata_error:
+ s5p_sg_done(dev);
++ dev->busy = false;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ s5p_aes_complete(dev, err);
+ }
+diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
+index 53a016c3dffa..82e42867a21f 100644
+--- a/drivers/dax/dax.c
++++ b/drivers/dax/dax.c
+@@ -76,36 +76,27 @@ struct dax_dev {
+ struct resource res[0];
+ };
+
++/*
++ * Rely on the fact that drvdata is set before the attributes are
++ * registered, and that the attributes are unregistered before drvdata
++ * is cleared to assume that drvdata is always valid.
++ */
+ static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+- struct dax_region *dax_region;
+- ssize_t rc = -ENXIO;
++ struct dax_region *dax_region = dev_get_drvdata(dev);
+
+- device_lock(dev);
+- dax_region = dev_get_drvdata(dev);
+- if (dax_region)
+- rc = sprintf(buf, "%d\n", dax_region->id);
+- device_unlock(dev);
+-
+- return rc;
++ return sprintf(buf, "%d\n", dax_region->id);
+ }
+ static DEVICE_ATTR_RO(id);
+
+ static ssize_t region_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+- struct dax_region *dax_region;
+- ssize_t rc = -ENXIO;
++ struct dax_region *dax_region = dev_get_drvdata(dev);
+
+- device_lock(dev);
+- dax_region = dev_get_drvdata(dev);
+- if (dax_region)
+- rc = sprintf(buf, "%llu\n", (unsigned long long)
+- resource_size(&dax_region->res));
+- device_unlock(dev);
+-
+- return rc;
++ return sprintf(buf, "%llu\n", (unsigned long long)
++ resource_size(&dax_region->res));
+ }
+ static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
+ region_size_show, NULL);
+@@ -113,16 +104,9 @@ static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
+ static ssize_t align_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+- struct dax_region *dax_region;
+- ssize_t rc = -ENXIO;
++ struct dax_region *dax_region = dev_get_drvdata(dev);
+
+- device_lock(dev);
+- dax_region = dev_get_drvdata(dev);
+- if (dax_region)
+- rc = sprintf(buf, "%u\n", dax_region->align);
+- device_unlock(dev);
+-
+- return rc;
++ return sprintf(buf, "%u\n", dax_region->align);
+ }
+ static DEVICE_ATTR_RO(align);
+
+@@ -646,13 +630,10 @@ static void dax_dev_release(struct device *dev)
+ kfree(dax_dev);
+ }
+
+-static void unregister_dax_dev(void *dev)
++static void kill_dax_dev(struct dax_dev *dax_dev)
+ {
+- struct dax_dev *dax_dev = to_dax_dev(dev);
+ struct cdev *cdev = &dax_dev->cdev;
+
+- dev_dbg(dev, "%s\n", __func__);
+-
+ /*
+ * Note, rcu is not protecting the liveness of dax_dev, rcu is
+ * ensuring that any fault handlers that might have seen
+@@ -664,6 +645,15 @@ static void unregister_dax_dev(void *dev)
+ synchronize_srcu(&dax_srcu);
+ unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
+ cdev_del(cdev);
++}
++
++static void unregister_dax_dev(void *dev)
++{
++ struct dax_dev *dax_dev = to_dax_dev(dev);
++
++ dev_dbg(dev, "%s\n", __func__);
++
++ kill_dax_dev(dax_dev);
+ device_unregister(dev);
+ }
+
+@@ -740,6 +730,7 @@ struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
+ dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id);
+ rc = device_add(dev);
+ if (rc) {
++ kill_dax_dev(dax_dev);
+ put_device(dev);
+ return ERR_PTR(rc);
+ }
+diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
+index c1fb545e8d78..42de5f22da93 100644
+--- a/drivers/infiniband/core/sysfs.c
++++ b/drivers/infiniband/core/sysfs.c
+@@ -1301,7 +1301,7 @@ int ib_device_register_sysfs(struct ib_device *device,
+ free_port_list_attributes(device);
+
+ err_unregister:
+- device_unregister(class_dev);
++ device_del(class_dev);
+
+ err:
+ return ret;
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 71580cc28c9e..5f76caede5ab 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -1520,7 +1520,9 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
+
+ if (!qp->device->attach_mcast)
+ return -ENOSYS;
+- if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
++ if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
++ lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
++ lid == be16_to_cpu(IB_LID_PERMISSIVE))
+ return -EINVAL;
+
+ ret = qp->device->attach_mcast(qp, gid, lid);
+@@ -1536,7 +1538,9 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
+
+ if (!qp->device->detach_mcast)
+ return -ENOSYS;
+- if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
++ if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
++ lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
++ lid == be16_to_cpu(IB_LID_PERMISSIVE))
+ return -EINVAL;
+
+ ret = qp->device->detach_mcast(qp, gid, lid);
+diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
+index 717ed4b159d3..6e46a6c002a7 100644
+--- a/drivers/infiniband/hw/hfi1/ruc.c
++++ b/drivers/infiniband/hw/hfi1/ruc.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright(c) 2015, 2016 Intel Corporation.
++ * Copyright(c) 2015 - 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+@@ -823,23 +823,29 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
+ /* when sending, force a reschedule every one of these periods */
+ #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
+
++void hfi1_do_send_from_rvt(struct rvt_qp *qp)
++{
++ hfi1_do_send(qp, false);
++}
++
+ void _hfi1_do_send(struct work_struct *work)
+ {
+ struct iowait *wait = container_of(work, struct iowait, iowork);
+ struct rvt_qp *qp = iowait_to_qp(wait);
+
+- hfi1_do_send(qp);
++ hfi1_do_send(qp, true);
+ }
+
+ /**
+ * hfi1_do_send - perform a send on a QP
+ * @work: contains a pointer to the QP
++ * @in_thread: true if in a workqueue thread
+ *
+ * Process entries in the send work queue until credit or queue is
+ * exhausted. Only allow one CPU to send a packet per QP.
+ * Otherwise, two threads could send packets out of order.
+ */
+-void hfi1_do_send(struct rvt_qp *qp)
++void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
+ {
+ struct hfi1_pkt_state ps;
+ struct hfi1_qp_priv *priv = qp->priv;
+@@ -907,8 +913,10 @@ void hfi1_do_send(struct rvt_qp *qp)
+ qp->s_hdrwords = 0;
+ /* allow other tasks to run */
+ if (unlikely(time_after(jiffies, timeout))) {
+- if (workqueue_congested(cpu,
+- ps.ppd->hfi1_wq)) {
++ if (!in_thread ||
++ workqueue_congested(
++ cpu,
++ ps.ppd->hfi1_wq)) {
+ spin_lock_irqsave(
+ &qp->s_lock,
+ ps.flags);
+@@ -921,11 +929,9 @@ void hfi1_do_send(struct rvt_qp *qp)
+ *ps.ppd->dd->send_schedule);
+ return;
+ }
+- if (!irqs_disabled()) {
+- cond_resched();
+- this_cpu_inc(
+- *ps.ppd->dd->send_schedule);
+- }
++ cond_resched();
++ this_cpu_inc(
++ *ps.ppd->dd->send_schedule);
+ timeout = jiffies + (timeout_int) / 8;
+ }
+ spin_lock_irqsave(&qp->s_lock, ps.flags);
+diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
+index 95ed4d6da510..6002aa9317ef 100644
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright(c) 2015, 2016 Intel Corporation.
++ * Copyright(c) 2015 - 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+@@ -1832,7 +1832,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
+ dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
+ dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
+ dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
+- dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send;
++ dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
+ dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
+ dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
+ dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
+diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
+index e6b893010e6d..228dc5545159 100644
+--- a/drivers/infiniband/hw/hfi1/verbs.h
++++ b/drivers/infiniband/hw/hfi1/verbs.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright(c) 2015, 2016 Intel Corporation.
++ * Copyright(c) 2015 - 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+@@ -370,7 +370,9 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
+
+ void _hfi1_do_send(struct work_struct *work);
+
+-void hfi1_do_send(struct rvt_qp *qp);
++void hfi1_do_send_from_rvt(struct rvt_qp *qp);
++
++void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
+
+ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ enum ib_wc_status status);
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index 7031a8dd4d14..71a99b9daa52 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -2935,6 +2935,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
+ mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
+
+ err_map:
++ mlx4_ib_free_eqs(dev, ibdev);
+ iounmap(ibdev->uar_map);
+
+ err_uar:
+diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
+index e010fe459e67..8772d88d324d 100644
+--- a/drivers/infiniband/hw/mlx4/mcg.c
++++ b/drivers/infiniband/hw/mlx4/mcg.c
+@@ -1102,7 +1102,8 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
+ while ((p = rb_first(&ctx->mcg_table)) != NULL) {
+ group = rb_entry(p, struct mcast_group, node);
+ if (atomic_read(&group->refcount))
+- mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group);
++ mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n",
++ atomic_read(&group->refcount), group);
+
+ force_clean_group(group);
+ }
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+index 6bd5740e2691..09396bd7b02d 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+@@ -281,8 +281,11 @@ void ipoib_delete_debug_files(struct net_device *dev)
+ {
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
++ WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
++ WARN_ONCE(!priv->path_dentry, "null path debug file\n");
+ debugfs_remove(priv->mcg_dentry);
+ debugfs_remove(priv->path_dentry);
++ priv->mcg_dentry = priv->path_dentry = NULL;
+ }
+
+ int ipoib_register_debugfs(void)
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index 4584c03bc355..d5931375d7a6 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -108,6 +108,33 @@ static struct ib_client ipoib_client = {
+ .get_net_dev_by_params = ipoib_get_net_dev_by_params,
+ };
+
++#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
++static int ipoib_netdev_event(struct notifier_block *this,
++ unsigned long event, void *ptr)
++{
++ struct netdev_notifier_info *ni = ptr;
++ struct net_device *dev = ni->dev;
++
++ if (dev->netdev_ops->ndo_open != ipoib_open)
++ return NOTIFY_DONE;
++
++ switch (event) {
++ case NETDEV_REGISTER:
++ ipoib_create_debug_files(dev);
++ break;
++ case NETDEV_CHANGENAME:
++ ipoib_delete_debug_files(dev);
++ ipoib_create_debug_files(dev);
++ break;
++ case NETDEV_UNREGISTER:
++ ipoib_delete_debug_files(dev);
++ break;
++ }
++
++ return NOTIFY_DONE;
++}
++#endif
++
+ int ipoib_open(struct net_device *dev)
+ {
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+@@ -1665,8 +1692,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
+
+ ASSERT_RTNL();
+
+- ipoib_delete_debug_files(dev);
+-
+ /* Delete any child interfaces first */
+ list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
+ /* Stop GC on child */
+@@ -2085,8 +2110,6 @@ static struct net_device *ipoib_add_port(const char *format,
+ goto register_failed;
+ }
+
+- ipoib_create_debug_files(priv->dev);
+-
+ if (ipoib_cm_add_mode_attr(priv->dev))
+ goto sysfs_failed;
+ if (ipoib_add_pkey_attr(priv->dev))
+@@ -2101,7 +2124,6 @@ static struct net_device *ipoib_add_port(const char *format,
+ return priv->dev;
+
+ sysfs_failed:
+- ipoib_delete_debug_files(priv->dev);
+ unregister_netdev(priv->dev);
+
+ register_failed:
+@@ -2186,6 +2208,12 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
+ kfree(dev_list);
+ }
+
++#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
++static struct notifier_block ipoib_netdev_notifier = {
++ .notifier_call = ipoib_netdev_event,
++};
++#endif
++
+ static int __init ipoib_init_module(void)
+ {
+ int ret;
+@@ -2238,6 +2266,9 @@ static int __init ipoib_init_module(void)
+ if (ret)
+ goto err_client;
+
++#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
++ register_netdevice_notifier(&ipoib_netdev_notifier);
++#endif
+ return 0;
+
+ err_client:
+@@ -2255,6 +2286,9 @@ static int __init ipoib_init_module(void)
+
+ static void __exit ipoib_cleanup_module(void)
+ {
++#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
++ unregister_netdevice_notifier(&ipoib_netdev_notifier);
++#endif
+ ipoib_netlink_fini();
+ ib_unregister_client(&ipoib_client);
+ ib_sa_unregister_client(&ipoib_sa_client);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+index fd811115af49..b692e9a2963c 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+@@ -87,8 +87,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
+ goto register_failed;
+ }
+
+- ipoib_create_debug_files(priv->dev);
+-
+ /* RTNL childs don't need proprietary sysfs entries */
+ if (type == IPOIB_LEGACY_CHILD) {
+ if (ipoib_cm_add_mode_attr(priv->dev))
+@@ -109,7 +107,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
+
+ sysfs_failed:
+ result = -ENOMEM;
+- ipoib_delete_debug_files(priv->dev);
+ unregister_netdevice(priv->dev);
+
+ register_failed:
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 8a9f742d8ed7..f6b9f2ce24ed 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1649,12 +1649,16 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
+
+ static int crypt_wipe_key(struct crypt_config *cc)
+ {
++ int r;
++
+ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+- memset(&cc->key, 0, cc->key_size * sizeof(u8));
++ get_random_bytes(&cc->key, cc->key_size);
+ kzfree(cc->key_string);
+ cc->key_string = NULL;
++ r = crypt_setkey(cc);
++ memset(&cc->key, 0, cc->key_size * sizeof(u8));
+
+- return crypt_setkey(cc);
++ return r;
+ }
+
+ static void crypt_dtr(struct dm_target *ti)
+diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
+index bf2b2676cb8a..80e3df1f1f7d 100644
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -961,15 +961,15 @@ static int metadata_commit(struct era_metadata *md)
+ }
+ }
+
+- r = save_sm_root(md);
++ r = dm_tm_pre_commit(md->tm);
+ if (r) {
+- DMERR("%s: save_sm_root failed", __func__);
++ DMERR("%s: pre commit failed", __func__);
+ return r;
+ }
+
+- r = dm_tm_pre_commit(md->tm);
++ r = save_sm_root(md);
+ if (r) {
+- DMERR("%s: pre commit failed", __func__);
++ DMERR("%s: save_sm_root failed", __func__);
+ return r;
+ }
+
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index 6e702fc69a83..1153478d3545 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -961,10 +961,14 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
+ dm_init_md_queue(md);
+
+ /* backfill 'mq' sysfs registration normally done in blk_register_queue */
+- blk_mq_register_dev(disk_to_dev(md->disk), q);
++ err = blk_mq_register_dev(disk_to_dev(md->disk), q);
++ if (err)
++ goto out_cleanup_queue;
+
+ return 0;
+
++out_cleanup_queue:
++ blk_cleanup_queue(q);
+ out_tag_set:
+ blk_mq_free_tag_set(md->tag_set);
+ out_kfree_tag_set:
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index d1c05c12a9db..be869a990e38 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -1070,6 +1070,7 @@ static void passdown_endio(struct bio *bio)
+ * to unmap (we ignore err).
+ */
+ queue_passdown_pt2(bio->bi_private);
++ bio_put(bio);
+ }
+
+ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 7b0f647bcccb..34540b3055c7 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2094,6 +2094,8 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
+ (i == r1_bio->read_disk ||
+ !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
+ continue;
++ if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
++ continue;
+
+ bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
+ if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
+diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
+index 97dd2925ed6e..4b76af2b8715 100644
+--- a/drivers/nvdimm/btt_devs.c
++++ b/drivers/nvdimm/btt_devs.c
+@@ -314,7 +314,7 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
+ if (rc < 0) {
+ struct nd_btt *nd_btt = to_nd_btt(btt_dev);
+
+- __nd_detach_ndns(btt_dev, &nd_btt->ndns);
++ nd_detach_ndns(btt_dev, &nd_btt->ndns);
+ put_device(btt_dev);
+ }
+
+diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
+index ca6d572c48fc..8513c8ac963b 100644
+--- a/drivers/nvdimm/claim.c
++++ b/drivers/nvdimm/claim.c
+@@ -21,8 +21,13 @@
+ void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
+ {
+ struct nd_namespace_common *ndns = *_ndns;
++ struct nvdimm_bus *nvdimm_bus;
+
+- lockdep_assert_held(&ndns->dev.mutex);
++ if (!ndns)
++ return;
++
++ nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev);
++ lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
+ dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__);
+ ndns->claim = NULL;
+ *_ndns = NULL;
+@@ -37,18 +42,20 @@ void nd_detach_ndns(struct device *dev,
+ if (!ndns)
+ return;
+ get_device(&ndns->dev);
+- device_lock(&ndns->dev);
++ nvdimm_bus_lock(&ndns->dev);
+ __nd_detach_ndns(dev, _ndns);
+- device_unlock(&ndns->dev);
++ nvdimm_bus_unlock(&ndns->dev);
+ put_device(&ndns->dev);
+ }
+
+ bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
+ struct nd_namespace_common **_ndns)
+ {
++ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev);
++
+ if (attach->claim)
+ return false;
+- lockdep_assert_held(&attach->dev.mutex);
++ lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
+ dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__);
+ attach->claim = dev;
+ *_ndns = attach;
+@@ -61,9 +68,9 @@ bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
+ {
+ bool claimed;
+
+- device_lock(&attach->dev);
++ nvdimm_bus_lock(&attach->dev);
+ claimed = __nd_attach_ndns(dev, attach, _ndns);
+- device_unlock(&attach->dev);
++ nvdimm_bus_unlock(&attach->dev);
+ return claimed;
+ }
+
+@@ -114,7 +121,7 @@ static void nd_detach_and_reset(struct device *dev,
+ struct nd_namespace_common **_ndns)
+ {
+ /* detach the namespace and destroy / reset the device */
+- nd_detach_ndns(dev, _ndns);
++ __nd_detach_ndns(dev, _ndns);
+ if (is_idle(dev, *_ndns)) {
+ nd_device_unregister(dev, ND_ASYNC);
+ } else if (is_nd_btt(dev)) {
+@@ -184,7 +191,7 @@ ssize_t nd_namespace_store(struct device *dev,
+ }
+
+ WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
+- if (!nd_attach_ndns(dev, ndns, _ndns)) {
++ if (!__nd_attach_ndns(dev, ndns, _ndns)) {
+ dev_dbg(dev, "%s already claimed\n",
+ dev_name(&ndns->dev));
+ len = -EBUSY;
+diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
+index 45fa82cae87c..c1b6556aea6e 100644
+--- a/drivers/nvdimm/dax_devs.c
++++ b/drivers/nvdimm/dax_devs.c
+@@ -124,7 +124,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
+ dev_dbg(dev, "%s: dax: %s\n", __func__,
+ rc == 0 ? dev_name(dax_dev) : "<none>");
+ if (rc < 0) {
+- __nd_detach_ndns(dax_dev, &nd_pfn->ndns);
++ nd_detach_ndns(dax_dev, &nd_pfn->ndns);
+ put_device(dax_dev);
+ } else
+ __nd_device_register(dax_dev);
+diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
+index 6c033c9a2f06..335c8175410b 100644
+--- a/drivers/nvdimm/pfn_devs.c
++++ b/drivers/nvdimm/pfn_devs.c
+@@ -484,7 +484,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
+ dev_dbg(dev, "%s: pfn: %s\n", __func__,
+ rc == 0 ? dev_name(pfn_dev) : "<none>");
+ if (rc < 0) {
+- __nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
++ nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
+ put_device(pfn_dev);
+ } else
+ __nd_device_register(pfn_dev);
+@@ -538,7 +538,8 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+ nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
+ altmap = NULL;
+ } else if (nd_pfn->mode == PFN_MODE_PMEM) {
+- nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE;
++ nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
++ - offset) / PAGE_SIZE);
+ if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
+ dev_info(&nd_pfn->dev,
+ "number of pfns truncated from %lld to %ld\n",
+@@ -625,7 +626,8 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+ */
+ start += start_pad;
+ size = resource_size(&nsio->res);
+- npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
++ npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
++ / PAGE_SIZE);
+ if (nd_pfn->mode == PFN_MODE_PMEM) {
+ /*
+ * vmemmap_populate_hugepages() allocates the memmap array in
+diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
+index 5b536be5a12e..0fc18262a2bc 100644
+--- a/drivers/nvdimm/pmem.c
++++ b/drivers/nvdimm/pmem.c
+@@ -388,12 +388,12 @@ static void nd_pmem_shutdown(struct device *dev)
+
+ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
+ {
+- struct pmem_device *pmem = dev_get_drvdata(dev);
+- struct nd_region *nd_region = to_region(pmem);
++ struct nd_region *nd_region;
+ resource_size_t offset = 0, end_trunc = 0;
+ struct nd_namespace_common *ndns;
+ struct nd_namespace_io *nsio;
+ struct resource res;
++ struct badblocks *bb;
+
+ if (event != NVDIMM_REVALIDATE_POISON)
+ return;
+@@ -402,20 +402,33 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+
+ ndns = nd_btt->ndns;
+- } else if (is_nd_pfn(dev)) {
+- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+- struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
++ nd_region = to_nd_region(ndns->dev.parent);
++ nsio = to_nd_namespace_io(&ndns->dev);
++ bb = &nsio->bb;
++ } else {
++ struct pmem_device *pmem = dev_get_drvdata(dev);
+
+- ndns = nd_pfn->ndns;
+- offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
+- end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+- } else
+- ndns = to_ndns(dev);
++ nd_region = to_region(pmem);
++ bb = &pmem->bb;
++
++ if (is_nd_pfn(dev)) {
++ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
++ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
++
++ ndns = nd_pfn->ndns;
++ offset = pmem->data_offset +
++ __le32_to_cpu(pfn_sb->start_pad);
++ end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
++ } else {
++ ndns = to_ndns(dev);
++ }
++
++ nsio = to_nd_namespace_io(&ndns->dev);
++ }
+
+- nsio = to_nd_namespace_io(&ndns->dev);
+ res.start = nsio->res.start + offset;
+ res.end = nsio->res.end - end_trunc;
+- nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
++ nvdimm_badblocks_populate(nd_region, bb, &res);
+ }
+
+ MODULE_ALIAS("pmem");
+diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
+index b7cb5066d961..378885f4050b 100644
+--- a/drivers/nvdimm/region_devs.c
++++ b/drivers/nvdimm/region_devs.c
+@@ -968,17 +968,20 @@ EXPORT_SYMBOL_GPL(nvdimm_flush);
+ */
+ int nvdimm_has_flush(struct nd_region *nd_region)
+ {
+- struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
+ int i;
+
+ /* no nvdimm == flushing capability unknown */
+ if (nd_region->ndr_mappings == 0)
+ return -ENXIO;
+
+- for (i = 0; i < nd_region->ndr_mappings; i++)
+- /* flush hints present, flushing required */
+- if (ndrd_get_flush_wpq(ndrd, i, 0))
++ for (i = 0; i < nd_region->ndr_mappings; i++) {
++ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
++ struct nvdimm *nvdimm = nd_mapping->nvdimm;
++
++ /* flush hints present / available */
++ if (nvdimm->num_flush)
+ return 1;
++ }
+
+ /*
+ * The platform defines dimm devices without hints, assume
+diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
+index 70390de66e0e..eb0a095efe9c 100644
+--- a/drivers/staging/comedi/drivers/jr3_pci.c
++++ b/drivers/staging/comedi/drivers/jr3_pci.c
+@@ -611,7 +611,7 @@ static void jr3_pci_poll_dev(unsigned long data)
+ s = &dev->subdevices[i];
+ spriv = s->private;
+
+- if (now > spriv->next_time_min) {
++ if (time_after_eq(now, spriv->next_time_min)) {
+ struct jr3_pci_poll_delay sub_delay;
+
+ sub_delay = jr3_pci_poll_subdevice(s);
+@@ -727,11 +727,12 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
+ s->insn_read = jr3_pci_ai_insn_read;
+
+ spriv = jr3_pci_alloc_spriv(dev, s);
+- if (spriv) {
+- /* Channel specific range and maxdata */
+- s->range_table_list = spriv->range_table_list;
+- s->maxdata_list = spriv->maxdata_list;
+- }
++ if (!spriv)
++ return -ENOMEM;
++
++ /* Channel specific range and maxdata */
++ s->range_table_list = spriv->range_table_list;
++ s->maxdata_list = spriv->maxdata_list;
+ }
+
+ /* Reset DSP card */
+diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
+index 400969170d1c..f03e43b1b5f6 100644
+--- a/drivers/staging/gdm724x/gdm_mux.c
++++ b/drivers/staging/gdm724x/gdm_mux.c
+@@ -664,9 +664,8 @@ static int __init gdm_usb_mux_init(void)
+
+ static void __exit gdm_usb_mux_exit(void)
+ {
+- unregister_lte_tty_driver();
+-
+ usb_deregister(&gdm_mux_driver);
++ unregister_lte_tty_driver();
+ }
+
+ module_init(gdm_usb_mux_init);
+diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
+index e9b6b21f7422..f759aa8a342d 100644
+--- a/drivers/staging/vt6656/usbpipe.c
++++ b/drivers/staging/vt6656/usbpipe.c
+@@ -47,15 +47,25 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
+ u16 index, u16 length, u8 *buffer)
+ {
+ int status = 0;
++ u8 *usb_buffer;
+
+ if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
+ return STATUS_FAILURE;
+
+ mutex_lock(&priv->usb_lock);
+
++ usb_buffer = kmemdup(buffer, length, GFP_KERNEL);
++ if (!usb_buffer) {
++ mutex_unlock(&priv->usb_lock);
++ return -ENOMEM;
++ }
++
+ status = usb_control_msg(priv->usb,
+- usb_sndctrlpipe(priv->usb, 0), request, 0x40, value,
+- index, buffer, length, USB_CTL_WAIT);
++ usb_sndctrlpipe(priv->usb, 0),
++ request, 0x40, value,
++ index, usb_buffer, length, USB_CTL_WAIT);
++
++ kfree(usb_buffer);
+
+ mutex_unlock(&priv->usb_lock);
+
+@@ -75,15 +85,28 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
+ u16 index, u16 length, u8 *buffer)
+ {
+ int status;
++ u8 *usb_buffer;
+
+ if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
+ return STATUS_FAILURE;
+
+ mutex_lock(&priv->usb_lock);
+
++ usb_buffer = kmalloc(length, GFP_KERNEL);
++ if (!usb_buffer) {
++ mutex_unlock(&priv->usb_lock);
++ return -ENOMEM;
++ }
++
+ status = usb_control_msg(priv->usb,
+- usb_rcvctrlpipe(priv->usb, 0), request, 0xc0, value,
+- index, buffer, length, USB_CTL_WAIT);
++ usb_rcvctrlpipe(priv->usb, 0),
++ request, 0xc0, value,
++ index, usb_buffer, length, USB_CTL_WAIT);
++
++ if (status == length)
++ memcpy(buffer, usb_buffer, length);
++
++ kfree(usb_buffer);
+
+ mutex_unlock(&priv->usb_lock);
+
+diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
+index 3775706578b2..99eb12469dcb 100644
+--- a/drivers/staging/wilc1000/linux_wlan.c
++++ b/drivers/staging/wilc1000/linux_wlan.c
+@@ -1251,11 +1251,12 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
+ else
+ strcpy(ndev->name, "p2p%d");
+
+- vif->idx = wl->vif_num;
+ vif->wilc = *wilc;
+ vif->ndev = ndev;
+ wl->vif[i] = vif;
+ wl->vif_num = i;
++ vif->idx = wl->vif_num;
++
+ ndev->netdev_ops = &wilc_netdev_ops;
+
+ {
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index da2c73a255de..4d970a7bbd97 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4673,6 +4673,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+ continue;
+ }
+ atomic_set(&sess->session_reinstatement, 1);
++ atomic_set(&sess->session_fall_back_to_erl0, 1);
+ spin_unlock(&sess->conn_lock);
+
+ list_move_tail(&se_sess->sess_list, &free_list);
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index bf40f03755dd..11291c1c779a 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -1531,6 +1531,7 @@ static void lio_tpg_close_session(struct se_session *se_sess)
+ return;
+ }
+ atomic_set(&sess->session_reinstatement, 1);
++ atomic_set(&sess->session_fall_back_to_erl0, 1);
+ spin_unlock(&sess->conn_lock);
+
+ iscsit_stop_time2retain_timer(sess);
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 450f51deb2a2..d11c9891134d 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -206,6 +206,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+ initiatorname_param->value) &&
+ (sess_p->sess_ops->SessionType == sessiontype))) {
+ atomic_set(&sess_p->session_reinstatement, 1);
++ atomic_set(&sess_p->session_fall_back_to_erl0, 1);
+ spin_unlock(&sess_p->conn_lock);
+ iscsit_inc_session_usage_count(sess_p);
+ iscsit_stop_time2retain_timer(sess_p);
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 87aa376a1a1a..e00050ccb61d 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -595,8 +595,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+- if (ret)
+- target_complete_cmd(cmd, SAM_STAT_GOOD);
++ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+ }
+
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 6ec5dded4ae0..cfb10b48c0cc 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -507,8 +507,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
+ * been failed with a non-zero SCSI status.
+ */
+ if (cmd->scsi_status) {
+- pr_err("compare_and_write_callback: non zero scsi_status:"
++ pr_debug("compare_and_write_callback: non zero scsi_status:"
+ " 0x%02x\n", cmd->scsi_status);
++ *post_ret = 1;
++ if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
++ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out;
+ }
+
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index a23fa5ed1d67..2b907385b4a8 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -216,16 +216,11 @@ static int pty_signal(struct tty_struct *tty, int sig)
+ static void pty_flush_buffer(struct tty_struct *tty)
+ {
+ struct tty_struct *to = tty->link;
+- struct tty_ldisc *ld;
+
+ if (!to)
+ return;
+
+- ld = tty_ldisc_ref(to);
+- tty_buffer_flush(to, ld);
+- if (ld)
+- tty_ldisc_deref(ld);
+-
++ tty_buffer_flush(to, NULL);
+ if (to->packet) {
+ spin_lock_irq(&tty->ctrl_lock);
+ tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index a2a529994ba5..44e5b5bf713b 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -1712,7 +1712,8 @@ static int serial_omap_probe(struct platform_device *pdev)
+ return 0;
+
+ err_add_port:
+- pm_runtime_put(&pdev->dev);
++ pm_runtime_dont_use_autosuspend(&pdev->dev);
++ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_qos_remove_request(&up->pm_qos_request);
+ device_init_wakeup(up->dev, false);
+@@ -1725,9 +1726,13 @@ static int serial_omap_remove(struct platform_device *dev)
+ {
+ struct uart_omap_port *up = platform_get_drvdata(dev);
+
++ pm_runtime_get_sync(up->dev);
++
++ uart_remove_one_port(&serial_omap_reg, &up->port);
++
++ pm_runtime_dont_use_autosuspend(up->dev);
+ pm_runtime_put_sync(up->dev);
+ pm_runtime_disable(up->dev);
+- uart_remove_one_port(&serial_omap_reg, &up->port);
+ pm_qos_remove_request(&up->pm_qos_request);
+ device_init_wakeup(&dev->dev, false);
+
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index 3e2ef4fd7382..d65f92bcd0f1 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -906,14 +906,13 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
+ return -ENOMEM;
+ }
+
+- dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf,
++ dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf,
+ dma->rx_size, DMA_FROM_DEVICE);
+
+ spin_lock_irqsave(&p->port.lock, flags);
+
+ /* TX buffer */
+- dma->tx_addr = dma_map_single(dma->tx_chan->device->dev,
+- p->port.state->xmit.buf,
++ dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ spin_unlock_irqrestore(&p->port.lock, flags);
+@@ -927,7 +926,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
+
+ if (dma->rx_chan) {
+ dmaengine_terminate_all(dma->rx_chan);
+- dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr,
++ dma_unmap_single(p->port.dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+ kfree(dma->rx_buf);
+ dma_release_channel(dma->rx_chan);
+@@ -936,7 +935,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
+
+ if (dma->tx_chan) {
+ dmaengine_terminate_all(dma->tx_chan);
+- dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr,
++ dma_unmap_single(p->port.dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ dma_release_channel(dma->tx_chan);
+ dma->tx_chan = NULL;
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 0a6369510f2d..0b845e550fbd 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -58,7 +58,6 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
+ #define WDM_SUSPENDING 8
+ #define WDM_RESETTING 9
+ #define WDM_OVERFLOW 10
+-#define WDM_DRAIN_ON_OPEN 11
+
+ #define WDM_MAX 16
+
+@@ -182,7 +181,7 @@ static void wdm_in_callback(struct urb *urb)
+ "nonzero urb status received: -ESHUTDOWN\n");
+ goto skip_error;
+ case -EPIPE:
+- dev_dbg(&desc->intf->dev,
++ dev_err(&desc->intf->dev,
+ "nonzero urb status received: -EPIPE\n");
+ break;
+ default:
+@@ -210,25 +209,6 @@ static void wdm_in_callback(struct urb *urb)
+ desc->reslength = length;
+ }
+ }
+-
+- /*
+- * Handling devices with the WDM_DRAIN_ON_OPEN flag set:
+- * If desc->resp_count is unset, then the urb was submitted
+- * without a prior notification. If the device returned any
+- * data, then this implies that it had messages queued without
+- * notifying us. Continue reading until that queue is flushed.
+- */
+- if (!desc->resp_count) {
+- if (!length) {
+- /* do not propagate the expected -EPIPE */
+- desc->rerr = 0;
+- goto unlock;
+- }
+- dev_dbg(&desc->intf->dev, "got %d bytes without notification\n", length);
+- set_bit(WDM_RESPONDING, &desc->flags);
+- usb_submit_urb(desc->response, GFP_ATOMIC);
+- }
+-
+ skip_error:
+ set_bit(WDM_READ, &desc->flags);
+ wake_up(&desc->wait);
+@@ -243,7 +223,6 @@ static void wdm_in_callback(struct urb *urb)
+ service_outstanding_interrupt(desc);
+ }
+
+-unlock:
+ spin_unlock(&desc->iuspin);
+ }
+
+@@ -686,17 +665,6 @@ static int wdm_open(struct inode *inode, struct file *file)
+ dev_err(&desc->intf->dev,
+ "Error submitting int urb - %d\n", rv);
+ rv = usb_translate_errors(rv);
+- } else if (test_bit(WDM_DRAIN_ON_OPEN, &desc->flags)) {
+- /*
+- * Some devices keep pending messages queued
+- * without resending notifications. We must
+- * flush the message queue before we can
+- * assume a one-to-one relationship between
+- * notifications and messages in the queue
+- */
+- dev_dbg(&desc->intf->dev, "draining queued data\n");
+- set_bit(WDM_RESPONDING, &desc->flags);
+- rv = usb_submit_urb(desc->response, GFP_KERNEL);
+ }
+ } else {
+ rv = 0;
+@@ -803,8 +771,7 @@ static void wdm_rxwork(struct work_struct *work)
+ /* --- hotplug --- */
+
+ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor *ep,
+- u16 bufsize, int (*manage_power)(struct usb_interface *, int),
+- bool drain_on_open)
++ u16 bufsize, int (*manage_power)(struct usb_interface *, int))
+ {
+ int rv = -ENOMEM;
+ struct wdm_device *desc;
+@@ -891,68 +858,6 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
+
+ desc->manage_power = manage_power;
+
+- /*
+- * "drain_on_open" enables a hack to work around a firmware
+- * issue observed on network functions, in particular MBIM
+- * functions.
+- *
+- * Quoting section 7 of the CDC-WMC r1.1 specification:
+- *
+- * "The firmware shall interpret GetEncapsulatedResponse as a
+- * request to read response bytes. The firmware shall send
+- * the next wLength bytes from the response. The firmware
+- * shall allow the host to retrieve data using any number of
+- * GetEncapsulatedResponse requests. The firmware shall
+- * return a zero- length reply if there are no data bytes
+- * available.
+- *
+- * The firmware shall send ResponseAvailable notifications
+- * periodically, using any appropriate algorithm, to inform
+- * the host that there is data available in the reply
+- * buffer. The firmware is allowed to send ResponseAvailable
+- * notifications even if there is no data available, but
+- * this will obviously reduce overall performance."
+- *
+- * These requirements, although they make equally sense, are
+- * often not implemented by network functions. Some firmwares
+- * will queue data indefinitely, without ever resending a
+- * notification. The result is that the driver and firmware
+- * loses "syncronization" if the driver ever fails to respond
+- * to a single notification, something which easily can happen
+- * on release(). When this happens, the driver will appear to
+- * never receive notifications for the most current data. Each
+- * notification will only cause a single read, which returns
+- * the oldest data in the firmware's queue.
+- *
+- * The "drain_on_open" hack resolves the situation by draining
+- * data from the firmware until none is returned, without a
+- * prior notification.
+- *
+- * This will inevitably race with the firmware, risking that
+- * we read data from the device before handling the associated
+- * notification. To make things worse, some of the devices
+- * needing the hack do not implement the "return zero if no
+- * data is available" requirement either. Instead they return
+- * an error on the subsequent read in this case. This means
+- * that "winning" the race can cause an unexpected EIO to
+- * userspace.
+- *
+- * "winning" the race is more likely on resume() than on
+- * open(), and the unexpected error is more harmful in the
+- * middle of an open session. The hack is therefore only
+- * applied on open(), and not on resume() where it logically
+- * would be equally necessary. So we define open() as the only
+- * driver <-> device "syncronization point". Should we happen
+- * to lose a notification after open(), then syncronization
+- * will be lost until release()
+- *
+- * The hack should not be enabled for CDC WDM devices
+- * conforming to the CDC-WMC r1.1 specification. This is
+- * ensured by setting drain_on_open to false in wdm_probe().
+- */
+- if (drain_on_open)
+- set_bit(WDM_DRAIN_ON_OPEN, &desc->flags);
+-
+ spin_lock(&wdm_device_list_lock);
+ list_add(&desc->device_list, &wdm_device_list);
+ spin_unlock(&wdm_device_list_lock);
+@@ -1006,7 +911,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ goto err;
+ ep = &iface->endpoint[0].desc;
+
+- rv = wdm_create(intf, ep, maxcom, &wdm_manage_power, false);
++ rv = wdm_create(intf, ep, maxcom, &wdm_manage_power);
+
+ err:
+ return rv;
+@@ -1038,7 +943,7 @@ struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf,
+ {
+ int rv = -EINVAL;
+
+- rv = wdm_create(intf, ep, bufsize, manage_power, true);
++ rv = wdm_create(intf, ep, bufsize, manage_power);
+ if (rv < 0)
+ goto err;
+
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index cdee5130638b..eb87a259d55c 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1331,6 +1331,24 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
+ */
+ if (udev->parent && !PMSG_IS_AUTO(msg))
+ status = 0;
++
++ /*
++ * If the device is inaccessible, don't try to resume
++ * suspended interfaces and just return the error.
++ */
++ if (status && status != -EBUSY) {
++ int err;
++ u16 devstat;
++
++ err = usb_get_status(udev, USB_RECIP_DEVICE, 0,
++ &devstat);
++ if (err) {
++ dev_err(&udev->dev,
++ "Failed to suspend device, error %d\n",
++ status);
++ goto done;
++ }
++ }
+ }
+
+ /* If the suspend failed, resume interfaces that did get suspended */
+@@ -1763,6 +1781,9 @@ static int autosuspend_check(struct usb_device *udev)
+ int w, i;
+ struct usb_interface *intf;
+
++ if (udev->state == USB_STATE_NOTATTACHED)
++ return -ENODEV;
++
+ /* Fail if autosuspend is disabled, or any interfaces are in use, or
+ * any interface drivers require remote wakeup but it isn't available.
+ */
+diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
+index e26bd5e773ad..87ad6b6bfee8 100644
+--- a/drivers/usb/core/file.c
++++ b/drivers/usb/core/file.c
+@@ -29,6 +29,7 @@
+ #define MAX_USB_MINORS 256
+ static const struct file_operations *usb_minors[MAX_USB_MINORS];
+ static DECLARE_RWSEM(minor_rwsem);
++static DEFINE_MUTEX(init_usb_class_mutex);
+
+ static int usb_open(struct inode *inode, struct file *file)
+ {
+@@ -111,8 +112,9 @@ static void release_usb_class(struct kref *kref)
+
+ static void destroy_usb_class(void)
+ {
+- if (usb_class)
+- kref_put(&usb_class->kref, release_usb_class);
++ mutex_lock(&init_usb_class_mutex);
++ kref_put(&usb_class->kref, release_usb_class);
++ mutex_unlock(&init_usb_class_mutex);
+ }
+
+ int usb_major_init(void)
+@@ -173,7 +175,10 @@ int usb_register_dev(struct usb_interface *intf,
+ if (intf->minor >= 0)
+ return -EADDRINUSE;
+
++ mutex_lock(&init_usb_class_mutex);
+ retval = init_usb_class();
++ mutex_unlock(&init_usb_class_mutex);
++
+ if (retval)
+ return retval;
+
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 48fbf523d186..4e87b94cca92 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1066,6 +1066,9 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+
+ portstatus = portchange = 0;
+ status = hub_port_status(hub, port1, &portstatus, &portchange);
++ if (status)
++ goto abort;
++
+ if (udev || (portstatus & USB_PORT_STAT_CONNECTION))
+ dev_dbg(&port_dev->dev, "status %04x change %04x\n",
+ portstatus, portchange);
+@@ -1198,7 +1201,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+
+ /* Scan all ports that need attention */
+ kick_hub_wq(hub);
+-
++ abort:
+ if (type == HUB_INIT2 || type == HUB_INIT3) {
+ /* Allow autosuspend if it was suppressed */
+ disconnected:
+@@ -2084,6 +2087,12 @@ void usb_disconnect(struct usb_device **pdev)
+ dev_info(&udev->dev, "USB disconnect, device number %d\n",
+ udev->devnum);
+
++ /*
++ * Ensure that the pm runtime code knows that the USB device
++ * is in the process of being disconnected.
++ */
++ pm_runtime_barrier(&udev->dev);
++
+ usb_lock_device(udev);
+
+ hub_disconnect_children(udev);
+diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
+index 8ad203296079..f3ee80ece682 100644
+--- a/drivers/usb/gadget/Kconfig
++++ b/drivers/usb/gadget/Kconfig
+@@ -460,6 +460,7 @@ config USB_CONFIGFS_F_TCM
+ choice
+ tristate "USB Gadget Drivers"
+ default USB_ETH
++ optional
+ help
+ A Linux "Gadget Driver" talks to the USB Peripheral Controller
+ driver through the abstract "gadget" API. Some other operating
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 8414ed2a02de..0d11dbd04de8 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1493,6 +1493,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ */
+ max_esit_payload = xhci_get_max_esit_payload(udev, ep);
+ interval = xhci_get_endpoint_interval(udev, ep);
++
++ /* Periodic endpoint bInterval limit quirk */
++ if (usb_endpoint_xfer_int(&ep->desc) ||
++ usb_endpoint_xfer_isoc(&ep->desc)) {
++ if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
++ udev->speed >= USB_SPEED_HIGH &&
++ interval >= 7) {
++ interval = 6;
++ }
++ }
++
+ mult = xhci_get_endpoint_mult(udev, ep);
+ max_packet = usb_endpoint_maxp(&ep->desc);
+ max_burst = xhci_get_endpoint_max_burst(udev, ep);
+@@ -2474,7 +2485,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
+ xhci->cmd_ring->cycle_state;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+- "// Setting command ring address to 0x%x", val);
++ "// Setting command ring address to 0x%016llx", val_64);
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
+ xhci_dbg_cmd_ptrs(xhci);
+
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 954abfd5014d..93f566cb968b 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -199,6 +199,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ pdev->device == 0x1042)
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
+
++ if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
++ xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
++
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Resetting on resume");
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index ea18bf49c2eb..98eb5d50e03d 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1652,6 +1652,7 @@ struct xhci_hcd {
+ #define XHCI_MISSING_CAS (1 << 24)
+ /* For controller with a broken Port Disable implementation */
+ #define XHCI_BROKEN_PORT_PED (1 << 25)
++#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
+
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
+index 3525626bf086..7bfb6b78f3a3 100644
+--- a/drivers/usb/misc/usbtest.c
++++ b/drivers/usb/misc/usbtest.c
+@@ -159,6 +159,7 @@ get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
+ case USB_ENDPOINT_XFER_INT:
+ if (dev->info->intr)
+ goto try_intr;
++ continue;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (dev->info->iso)
+ goto try_iso;
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index c6c388bed156..b3c97c0cc882 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -873,6 +873,7 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID,
+ USB_CLASS_VENDOR_SPEC,
+ USB_SUBCLASS_VENDOR_SPEC, 0x00) },
++ { USB_DEVICE_INTERFACE_NUMBER(ACTEL_VID, MICROSEMI_ARROW_SF2PLUS_BOARD_PID, 2) },
+ { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
+ { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 48ee04c94a75..71fb9e59db71 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -873,6 +873,12 @@
+ #define FIC_VID 0x1457
+ #define FIC_NEO1973_DEBUG_PID 0x5118
+
++/*
++ * Actel / Microsemi
++ */
++#define ACTEL_VID 0x1514
++#define MICROSEMI_ARROW_SF2PLUS_BOARD_PID 0x2008
++
+ /* Olimex */
+ #define OLIMEX_VID 0x15BA
+ #define OLIMEX_ARM_USB_OCD_PID 0x0003
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index b3cc33fa6d26..5ecd400dcf8c 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -243,69 +243,46 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
+ return ret;
+ }
+
+-struct vwork {
+- struct mm_struct *mm;
+- long npage;
+- struct work_struct work;
+-};
+-
+-/* delayed decrement/increment for locked_vm */
+-static void vfio_lock_acct_bg(struct work_struct *work)
+-{
+- struct vwork *vwork = container_of(work, struct vwork, work);
+- struct mm_struct *mm;
+-
+- mm = vwork->mm;
+- down_write(&mm->mmap_sem);
+- mm->locked_vm += vwork->npage;
+- up_write(&mm->mmap_sem);
+- mmput(mm);
+- kfree(vwork);
+-}
+-
+-static void vfio_lock_acct(struct task_struct *task, long npage)
++static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap)
+ {
+- struct vwork *vwork;
+ struct mm_struct *mm;
+ bool is_current;
++ int ret;
+
+ if (!npage)
+- return;
++ return 0;
+
+ is_current = (task->mm == current->mm);
+
+ mm = is_current ? task->mm : get_task_mm(task);
+ if (!mm)
+- return; /* process exited */
++ return -ESRCH; /* process exited */
+
+- if (down_write_trylock(&mm->mmap_sem)) {
+- mm->locked_vm += npage;
+- up_write(&mm->mmap_sem);
+- if (!is_current)
+- mmput(mm);
+- return;
+- }
++ ret = down_write_killable(&mm->mmap_sem);
++ if (!ret) {
++ if (npage > 0) {
++ if (lock_cap ? !*lock_cap :
++ !has_capability(task, CAP_IPC_LOCK)) {
++ unsigned long limit;
++
++ limit = task_rlimit(task,
++ RLIMIT_MEMLOCK) >> PAGE_SHIFT;
++
++ if (mm->locked_vm + npage > limit)
++ ret = -ENOMEM;
++ }
++ }
++
++ if (!ret)
++ mm->locked_vm += npage;
+
+- if (is_current) {
+- mm = get_task_mm(task);
+- if (!mm)
+- return;
++ up_write(&mm->mmap_sem);
+ }
+
+- /*
+- * Couldn't get mmap_sem lock, so must setup to update
+- * mm->locked_vm later. If locked_vm were atomic, we
+- * wouldn't need this silliness
+- */
+- vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
+- if (WARN_ON(!vwork)) {
++ if (!is_current)
+ mmput(mm);
+- return;
+- }
+- INIT_WORK(&vwork->work, vfio_lock_acct_bg);
+- vwork->mm = mm;
+- vwork->npage = npage;
+- schedule_work(&vwork->work);
++
++ return ret;
+ }
+
+ /*
+@@ -402,7 +379,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
+ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
+ long npage, unsigned long *pfn_base)
+ {
+- unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
++ unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ bool lock_cap = capable(CAP_IPC_LOCK);
+ long ret, pinned = 0, lock_acct = 0;
+ bool rsvd;
+@@ -439,8 +416,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
+ /* Lock all the consecutive pages from pfn_base */
+ for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
+ pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
+- unsigned long pfn = 0;
+-
+ ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
+ if (ret)
+ break;
+@@ -457,14 +432,25 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
+ put_pfn(pfn, dma->prot);
+ pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
+ __func__, limit << PAGE_SHIFT);
+- break;
++ ret = -ENOMEM;
++ goto unpin_out;
+ }
+ lock_acct++;
+ }
+ }
+
+ out:
+- vfio_lock_acct(current, lock_acct);
++ ret = vfio_lock_acct(current, lock_acct, &lock_cap);
++
++unpin_out:
++ if (ret) {
++ if (!rsvd) {
++ for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
++ put_pfn(pfn, dma->prot);
++ }
++
++ return ret;
++ }
+
+ return pinned;
+ }
+@@ -485,7 +471,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
+ }
+
+ if (do_accounting)
+- vfio_lock_acct(dma->task, locked - unlocked);
++ vfio_lock_acct(dma->task, locked - unlocked, NULL);
+
+ return unlocked;
+ }
+@@ -519,8 +505,14 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
+ goto pin_page_exit;
+ }
+
+- if (!rsvd && do_accounting)
+- vfio_lock_acct(dma->task, 1);
++ if (!rsvd && do_accounting) {
++ ret = vfio_lock_acct(dma->task, 1, &lock_cap);
++ if (ret) {
++ put_pfn(*pfn_base, dma->prot);
++ goto pin_page_exit;
++ }
++ }
++
+ ret = 1;
+
+ pin_page_exit:
+@@ -540,7 +532,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
+ unlocked = vfio_iova_put_vfio_pfn(dma, vpfn);
+
+ if (do_accounting)
+- vfio_lock_acct(dma->task, -unlocked);
++ vfio_lock_acct(dma->task, -unlocked, NULL);
+
+ return unlocked;
+ }
+@@ -737,7 +729,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
+
+ dma->iommu_mapped = false;
+ if (do_accounting) {
+- vfio_lock_acct(dma->task, -unlocked);
++ vfio_lock_acct(dma->task, -unlocked, NULL);
+ return 0;
+ }
+ return unlocked;
+@@ -1346,7 +1338,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
+ if (!is_invalid_reserved_pfn(vpfn->pfn))
+ locked++;
+ }
+- vfio_lock_acct(dma->task, locked - unlocked);
++ vfio_lock_acct(dma->task, locked - unlocked, NULL);
+ }
+ }
+
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index b94e2a4974a1..68a5c769495f 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -103,12 +103,11 @@ void invalidate_bdev(struct block_device *bdev)
+ {
+ struct address_space *mapping = bdev->bd_inode->i_mapping;
+
+- if (mapping->nrpages == 0)
+- return;
+-
+- invalidate_bh_lrus();
+- lru_add_drain_all(); /* make sure all lru add caches are flushed */
+- invalidate_mapping_pages(mapping, 0, -1);
++ if (mapping->nrpages) {
++ invalidate_bh_lrus();
++ lru_add_drain_all(); /* make sure all lru add caches are flushed */
++ invalidate_mapping_pages(mapping, 0, -1);
++ }
+ /* 99% of the time, we don't need to flush the cleancache on the bdev.
+ * But, for the strange corners, lets be cautious
+ */
+diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
+index febc28f9e2c2..75267cdd5dfd 100644
+--- a/fs/ceph/xattr.c
++++ b/fs/ceph/xattr.c
+@@ -392,6 +392,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
+
+ if (update_xattr) {
+ int err = 0;
++
+ if (xattr && (flags & XATTR_CREATE))
+ err = -EEXIST;
+ else if (!xattr && (flags & XATTR_REPLACE))
+@@ -399,12 +400,14 @@ static int __set_xattr(struct ceph_inode_info *ci,
+ if (err) {
+ kfree(name);
+ kfree(val);
++ kfree(*newxattr);
+ return err;
+ }
+ if (update_xattr < 0) {
+ if (xattr)
+ __remove_xattr(ci, xattr);
+ kfree(name);
++ kfree(*newxattr);
+ return 0;
+ }
+ }
+diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
+index 02b071bf3732..a0b3e7d1be48 100644
+--- a/fs/cifs/cifs_unicode.c
++++ b/fs/cifs/cifs_unicode.c
+@@ -83,6 +83,9 @@ convert_sfm_char(const __u16 src_char, char *target)
+ case SFM_COLON:
+ *target = ':';
+ break;
++ case SFM_DOUBLEQUOTE:
++ *target = '"';
++ break;
+ case SFM_ASTERISK:
+ *target = '*';
+ break;
+@@ -418,6 +421,9 @@ static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
+ case ':':
+ dest_char = cpu_to_le16(SFM_COLON);
+ break;
++ case '"':
++ dest_char = cpu_to_le16(SFM_DOUBLEQUOTE);
++ break;
+ case '*':
+ dest_char = cpu_to_le16(SFM_ASTERISK);
+ break;
+diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
+index 479bc0a941f3..07ade707fa60 100644
+--- a/fs/cifs/cifs_unicode.h
++++ b/fs/cifs/cifs_unicode.h
+@@ -57,6 +57,7 @@
+ * not conflict (although almost does) with the mapping above.
+ */
+
++#define SFM_DOUBLEQUOTE ((__u16) 0xF020)
+ #define SFM_ASTERISK ((__u16) 0xF021)
+ #define SFM_QUESTION ((__u16) 0xF025)
+ #define SFM_COLON ((__u16) 0xF022)
+@@ -64,8 +65,8 @@
+ #define SFM_LESSTHAN ((__u16) 0xF023)
+ #define SFM_PIPE ((__u16) 0xF027)
+ #define SFM_SLASH ((__u16) 0xF026)
+-#define SFM_PERIOD ((__u16) 0xF028)
+-#define SFM_SPACE ((__u16) 0xF029)
++#define SFM_SPACE ((__u16) 0xF028)
++#define SFM_PERIOD ((__u16) 0xF029)
+
+ /*
+ * Mapping mechanism to use when one of the seven reserved characters is
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 70f4e65fced2..16b532acc13d 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -87,6 +87,7 @@ extern mempool_t *cifs_req_poolp;
+ extern mempool_t *cifs_mid_poolp;
+
+ struct workqueue_struct *cifsiod_wq;
++struct workqueue_struct *cifsoplockd_wq;
+ __u32 cifs_lock_secret;
+
+ /*
+@@ -1282,9 +1283,16 @@ init_cifs(void)
+ goto out_clean_proc;
+ }
+
++ cifsoplockd_wq = alloc_workqueue("cifsoplockd",
++ WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
++ if (!cifsoplockd_wq) {
++ rc = -ENOMEM;
++ goto out_destroy_cifsiod_wq;
++ }
++
+ rc = cifs_fscache_register();
+ if (rc)
+- goto out_destroy_wq;
++ goto out_destroy_cifsoplockd_wq;
+
+ rc = cifs_init_inodecache();
+ if (rc)
+@@ -1332,7 +1340,9 @@ init_cifs(void)
+ cifs_destroy_inodecache();
+ out_unreg_fscache:
+ cifs_fscache_unregister();
+-out_destroy_wq:
++out_destroy_cifsoplockd_wq:
++ destroy_workqueue(cifsoplockd_wq);
++out_destroy_cifsiod_wq:
+ destroy_workqueue(cifsiod_wq);
+ out_clean_proc:
+ cifs_proc_clean();
+@@ -1355,6 +1365,7 @@ exit_cifs(void)
+ cifs_destroy_mids();
+ cifs_destroy_inodecache();
+ cifs_fscache_unregister();
++ destroy_workqueue(cifsoplockd_wq);
+ destroy_workqueue(cifsiod_wq);
+ cifs_proc_clean();
+ }
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 7fa45f48e59d..aba772a82bf2 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -1655,6 +1655,7 @@ void cifs_oplock_break(struct work_struct *work);
+
+ extern const struct slow_work_ops cifs_oplock_break_ops;
+ extern struct workqueue_struct *cifsiod_wq;
++extern struct workqueue_struct *cifsoplockd_wq;
+ extern __u32 cifs_lock_secret;
+
+ extern mempool_t *cifs_mid_poolp;
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 2dc92351027b..a1016485468d 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -717,6 +717,9 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
+ if (rc)
+ return rc;
+
++ if (server->capabilities & CAP_UNICODE)
++ smb->hdr.Flags2 |= SMBFLG2_UNICODE;
++
+ /* set up echo request */
+ smb->hdr.Tid = 0xffff;
+ smb->hdr.WordCount = 1;
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index acf7bc1eab77..cfb3b5a50005 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2879,16 +2879,14 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+ {
+ struct cifs_sb_info *old = CIFS_SB(sb);
+ struct cifs_sb_info *new = mnt_data->cifs_sb;
++ bool old_set = old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
++ bool new_set = new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
+
+- if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) {
+- if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH))
+- return 0;
+- /* The prepath should be null terminated strings */
+- if (strcmp(new->prepath, old->prepath))
+- return 0;
+-
++ if (old_set && new_set && !strcmp(new->prepath, old->prepath))
+ return 1;
+- }
++ else if (!old_set && !new_set)
++ return 1;
++
+ return 0;
+ }
+
+diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
+index 001528781b6b..bdba9e7a9438 100644
+--- a/fs/cifs/ioctl.c
++++ b/fs/cifs/ioctl.c
+@@ -264,10 +264,14 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
+ rc = -EOPNOTSUPP;
+ break;
+ case CIFS_IOC_GET_MNT_INFO:
++ if (pSMBFile == NULL)
++ break;
+ tcon = tlink_tcon(pSMBFile->tlink);
+ rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
+ break;
+ case CIFS_ENUMERATE_SNAPSHOTS:
++ if (pSMBFile == NULL)
++ break;
+ if (arg == 0) {
+ rc = -EINVAL;
+ goto cifs_ioc_exit;
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index c6729156f9a0..5419afea0a36 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -492,7 +492,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
+ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+ &pCifsInode->flags);
+
+- queue_work(cifsiod_wq,
++ queue_work(cifsoplockd_wq,
+ &netfile->oplock_break);
+ netfile->oplock_break_cancelled = false;
+
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index 97307808ae42..967dfe656ced 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -494,7 +494,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
+ else
+ cfile->oplock_break_cancelled = true;
+
+- queue_work(cifsiod_wq, &cfile->oplock_break);
++ queue_work(cifsoplockd_wq, &cfile->oplock_break);
+ kfree(lw);
+ return true;
+ }
+@@ -638,7 +638,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+ &cinode->flags);
+ spin_unlock(&cfile->file_info_lock);
+- queue_work(cifsiod_wq, &cfile->oplock_break);
++ queue_work(cifsoplockd_wq,
++ &cfile->oplock_break);
+
+ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 007abf7195af..36334fe3266c 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -924,6 +924,7 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
+ }
+ if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
+ rc = -ERANGE;
++ kfree(retbuf);
+ return rc;
+ }
+
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 802185386851..7c1c6c39d582 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -569,8 +569,12 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
+ }
+
+ if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
+- cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
+- return -EIO;
++ cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n",
++ rsplen);
++
++ /* relax check since Mac returns max bufsize allowed on ioctl */
++ if (rsplen > CIFSMaxBufSize)
++ return -EIO;
+ }
+
+ /* check validate negotiate info response matches what we got earlier */
+@@ -1670,8 +1674,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ * than one credit. Windows typically sets this smaller, but for some
+ * ioctls it may be useful to allow server to send more. No point
+ * limiting what the server can send as long as fits in one credit
++ * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE
++ * (by default, note that it can be overridden to make max larger)
++ * in responses (except for read responses which can be bigger.
++ * We may want to bump this limit up
+ */
+- req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */
++ req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize);
+
+ if (is_fsctl)
+ req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
+diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
+index d6cd7ea4851d..ef76f93c50cb 100644
+--- a/fs/crypto/policy.c
++++ b/fs/crypto/policy.c
+@@ -169,27 +169,61 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
+ }
+ EXPORT_SYMBOL(fscrypt_ioctl_get_policy);
+
++/**
++ * fscrypt_has_permitted_context() - is a file's encryption policy permitted
++ * within its directory?
++ *
++ * @parent: inode for parent directory
++ * @child: inode for file being looked up, opened, or linked into @parent
++ *
++ * Filesystems must call this before permitting access to an inode in a
++ * situation where the parent directory is encrypted (either before allowing
++ * ->lookup() to succeed, or for a regular file before allowing it to be opened)
++ * and before any operation that involves linking an inode into an encrypted
++ * directory, including link, rename, and cross rename. It enforces the
++ * constraint that within a given encrypted directory tree, all files use the
++ * same encryption policy. The pre-access check is needed to detect potentially
++ * malicious offline violations of this constraint, while the link and rename
++ * checks are needed to prevent online violations of this constraint.
++ *
++ * Return: 1 if permitted, 0 if forbidden. If forbidden, the caller must fail
++ * the filesystem operation with EPERM.
++ */
+ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
+ {
+- struct fscrypt_info *parent_ci, *child_ci;
++ const struct fscrypt_operations *cops = parent->i_sb->s_cop;
++ const struct fscrypt_info *parent_ci, *child_ci;
++ struct fscrypt_context parent_ctx, child_ctx;
+ int res;
+
+- if ((parent == NULL) || (child == NULL)) {
+- printk(KERN_ERR "parent %p child %p\n", parent, child);
+- BUG_ON(1);
+- }
+-
+ /* No restrictions on file types which are never encrypted */
+ if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
+ !S_ISLNK(child->i_mode))
+ return 1;
+
+- /* no restrictions if the parent directory is not encrypted */
+- if (!parent->i_sb->s_cop->is_encrypted(parent))
++ /* No restrictions if the parent directory is unencrypted */
++ if (!cops->is_encrypted(parent))
+ return 1;
+- /* if the child directory is not encrypted, this is always a problem */
+- if (!parent->i_sb->s_cop->is_encrypted(child))
++
++ /* Encrypted directories must not contain unencrypted files */
++ if (!cops->is_encrypted(child))
+ return 0;
++
++ /*
++ * Both parent and child are encrypted, so verify they use the same
++ * encryption policy. Compare the fscrypt_info structs if the keys are
++ * available, otherwise retrieve and compare the fscrypt_contexts.
++ *
++ * Note that the fscrypt_context retrieval will be required frequently
++ * when accessing an encrypted directory tree without the key.
++ * Performance-wise this is not a big deal because we already don't
++ * really optimize for file access without the key (to the extent that
++ * such access is even possible), given that any attempted access
++ * already causes a fscrypt_context retrieval and keyring search.
++ *
++ * In any case, if an unexpected error occurs, fall back to "forbidden".
++ */
++
+ res = fscrypt_get_encryption_info(parent);
+ if (res)
+ return 0;
+@@ -198,17 +232,32 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
+ return 0;
+ parent_ci = parent->i_crypt_info;
+ child_ci = child->i_crypt_info;
+- if (!parent_ci && !child_ci)
+- return 1;
+- if (!parent_ci || !child_ci)
++
++ if (parent_ci && child_ci) {
++ return memcmp(parent_ci->ci_master_key, child_ci->ci_master_key,
++ FS_KEY_DESCRIPTOR_SIZE) == 0 &&
++ (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
++ (parent_ci->ci_filename_mode ==
++ child_ci->ci_filename_mode) &&
++ (parent_ci->ci_flags == child_ci->ci_flags);
++ }
++
++ res = cops->get_context(parent, &parent_ctx, sizeof(parent_ctx));
++ if (res != sizeof(parent_ctx))
+ return 0;
+
+- return (memcmp(parent_ci->ci_master_key,
+- child_ci->ci_master_key,
+- FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+- (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
+- (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) &&
+- (parent_ci->ci_flags == child_ci->ci_flags));
++ res = cops->get_context(child, &child_ctx, sizeof(child_ctx));
++ if (res != sizeof(child_ctx))
++ return 0;
++
++ return memcmp(parent_ctx.master_key_descriptor,
++ child_ctx.master_key_descriptor,
++ FS_KEY_DESCRIPTOR_SIZE) == 0 &&
++ (parent_ctx.contents_encryption_mode ==
++ child_ctx.contents_encryption_mode) &&
++ (parent_ctx.filenames_encryption_mode ==
++ child_ctx.filenames_encryption_mode) &&
++ (parent_ctx.flags == child_ctx.flags);
+ }
+ EXPORT_SYMBOL(fscrypt_has_permitted_context);
+
+diff --git a/fs/dax.c b/fs/dax.c
+index a39b404b646a..917c66584810 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -503,35 +503,6 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
+ }
+
+ /*
+- * Invalidate exceptional DAX entry if easily possible. This handles DAX
+- * entries for invalidate_inode_pages() so we evict the entry only if we can
+- * do so without blocking.
+- */
+-int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
+-{
+- int ret = 0;
+- void *entry, **slot;
+- struct radix_tree_root *page_tree = &mapping->page_tree;
+-
+- spin_lock_irq(&mapping->tree_lock);
+- entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
+- if (!entry || !radix_tree_exceptional_entry(entry) ||
+- slot_locked(mapping, slot))
+- goto out;
+- if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
+- radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
+- goto out;
+- radix_tree_delete(page_tree, index);
+- mapping->nrexceptional--;
+- ret = 1;
+-out:
+- spin_unlock_irq(&mapping->tree_lock);
+- if (ret)
+- dax_wake_mapping_entry_waiter(mapping, index, entry, true);
+- return ret;
+-}
+-
+-/*
+ * Invalidate exceptional DAX entry if it is clean.
+ */
+ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
+@@ -1029,7 +1000,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ * into page tables. We have to tear down these mappings so that data
+ * written by write(2) is visible in mmap.
+ */
+- if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
++ if (iomap->flags & IOMAP_F_NEW) {
+ invalidate_inode_pages2_range(inode->i_mapping,
+ pos >> PAGE_SHIFT,
+ (end - 1) >> PAGE_SHIFT);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 750b3f1eba31..a5f0fef53a81 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5810,6 +5810,11 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ file_update_time(vma->vm_file);
+
+ down_read(&EXT4_I(inode)->i_mmap_sem);
++
++ ret = ext4_convert_inline_data(inode);
++ if (ret)
++ goto out_ret;
++
+ /* Delalloc case is easy... */
+ if (test_opt(inode->i_sb, DELALLOC) &&
+ !ext4_should_journal_data(inode) &&
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index af06bda51a54..0cf3794a69a7 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -316,7 +316,6 @@ int update_inode_page(struct inode *inode)
+ } else if (err != -ENOENT) {
+ f2fs_stop_checkpoint(sbi, false);
+ }
+- f2fs_inode_synced(inode);
+ return 0;
+ }
+ ret = update_inode(inode, node_page);
+@@ -446,6 +445,7 @@ void handle_failed_inode(struct inode *inode)
+ * in a panic when flushing dirty inodes in gdirty_list.
+ */
+ update_inode_page(inode);
++ f2fs_inode_synced(inode);
+
+ /* don't make bad inode, since it becomes a regular file. */
+ unlock_new_inode(inode);
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index 56c19b0610a8..c4aca1900941 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -148,8 +148,6 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ inode->i_mapping->a_ops = &f2fs_dblock_aops;
+ ino = inode->i_ino;
+
+- f2fs_balance_fs(sbi, true);
+-
+ f2fs_lock_op(sbi);
+ err = f2fs_add_link(dentry, inode);
+ if (err)
+@@ -163,6 +161,8 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+
+ if (IS_DIRSYNC(dir))
+ f2fs_sync_fs(sbi->sb, 1);
++
++ f2fs_balance_fs(sbi, true);
+ return 0;
+ out:
+ handle_failed_inode(inode);
+@@ -423,8 +423,6 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
+ inode_nohighmem(inode);
+ inode->i_mapping->a_ops = &f2fs_dblock_aops;
+
+- f2fs_balance_fs(sbi, true);
+-
+ f2fs_lock_op(sbi);
+ err = f2fs_add_link(dentry, inode);
+ if (err)
+@@ -487,6 +485,8 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
+ }
+
+ kfree(sd);
++
++ f2fs_balance_fs(sbi, true);
+ return err;
+ out:
+ handle_failed_inode(inode);
+@@ -508,8 +508,6 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ inode->i_mapping->a_ops = &f2fs_dblock_aops;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
+
+- f2fs_balance_fs(sbi, true);
+-
+ set_inode_flag(inode, FI_INC_LINK);
+ f2fs_lock_op(sbi);
+ err = f2fs_add_link(dentry, inode);
+@@ -524,6 +522,8 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+
+ if (IS_DIRSYNC(dir))
+ f2fs_sync_fs(sbi->sb, 1);
++
++ f2fs_balance_fs(sbi, true);
+ return 0;
+
+ out_fail:
+@@ -554,8 +554,6 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
+ init_special_inode(inode, inode->i_mode, rdev);
+ inode->i_op = &f2fs_special_inode_operations;
+
+- f2fs_balance_fs(sbi, true);
+-
+ f2fs_lock_op(sbi);
+ err = f2fs_add_link(dentry, inode);
+ if (err)
+@@ -569,6 +567,8 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
+
+ if (IS_DIRSYNC(dir))
+ f2fs_sync_fs(sbi->sb, 1);
++
++ f2fs_balance_fs(sbi, true);
+ return 0;
+ out:
+ handle_failed_inode(inode);
+@@ -595,8 +595,6 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
+ inode->i_mapping->a_ops = &f2fs_dblock_aops;
+ }
+
+- f2fs_balance_fs(sbi, true);
+-
+ f2fs_lock_op(sbi);
+ err = acquire_orphan_inode(sbi);
+ if (err)
+@@ -622,6 +620,8 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
+ /* link_count was changed by d_tmpfile as well. */
+ f2fs_unlock_op(sbi);
+ unlock_new_inode(inode);
++
++ f2fs_balance_fs(sbi, true);
+ return 0;
+
+ release_out:
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index bdc3afad4a8c..6b3531167e6d 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1348,7 +1348,7 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
+ jbd2_superblock_csum_set(journal, sb);
+ get_bh(bh);
+ bh->b_end_io = end_buffer_write_sync;
+- ret = submit_bh(REQ_OP_WRITE, write_flags, bh);
++ ret = submit_bh(REQ_OP_WRITE, write_flags | REQ_SYNC, bh);
+ wait_on_buffer(bh);
+ if (buffer_write_io_error(bh)) {
+ clear_buffer_write_io_error(bh);
+diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
+index 551bc74ed2b8..fdf520f43f2e 100644
+--- a/fs/orangefs/inode.c
++++ b/fs/orangefs/inode.c
+@@ -224,8 +224,7 @@ int orangefs_setattr(struct dentry *dentry, struct iattr *iattr)
+ if (ret)
+ goto out;
+
+- if ((iattr->ia_valid & ATTR_SIZE) &&
+- iattr->ia_size != i_size_read(inode)) {
++ if (iattr->ia_valid & ATTR_SIZE) {
+ ret = orangefs_setattr_size(inode, iattr);
+ if (ret)
+ goto out;
+diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
+index a290ff6ec756..7c315938e9c2 100644
+--- a/fs/orangefs/namei.c
++++ b/fs/orangefs/namei.c
+@@ -193,8 +193,6 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
+ goto out;
+ }
+
+- ORANGEFS_I(inode)->getattr_time = jiffies - 1;
+-
+ gossip_debug(GOSSIP_NAME_DEBUG,
+ "%s:%s:%d "
+ "Found good inode [%lu] with count [%d]\n",
+diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
+index 74a81b1daaac..237c9c04dc3b 100644
+--- a/fs/orangefs/xattr.c
++++ b/fs/orangefs/xattr.c
+@@ -76,11 +76,8 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *name,
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+
+- if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
+- gossip_err("Invalid key length (%d)\n",
+- (int)strlen(name));
++ if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
+ return -EINVAL;
+- }
+
+ fsuid = from_kuid(&init_user_ns, current_fsuid());
+ fsgid = from_kgid(&init_user_ns, current_fsgid());
+@@ -172,6 +169,9 @@ static int orangefs_inode_removexattr(struct inode *inode, const char *name,
+ struct orangefs_kernel_op_s *new_op = NULL;
+ int ret = -ENOMEM;
+
++ if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
++ return -EINVAL;
++
+ down_write(&orangefs_inode->xattr_sem);
+ new_op = op_alloc(ORANGEFS_VFS_OP_REMOVEXATTR);
+ if (!new_op)
+@@ -231,23 +231,13 @@ int orangefs_inode_setxattr(struct inode *inode, const char *name,
+ "%s: name %s, buffer_size %zd\n",
+ __func__, name, size);
+
+- if (size >= ORANGEFS_MAX_XATTR_VALUELEN ||
+- flags < 0) {
+- gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n",
+- (int)size,
+- flags);
++ if (size > ORANGEFS_MAX_XATTR_VALUELEN)
++ return -EINVAL;
++ if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
+ return -EINVAL;
+- }
+
+ internal_flag = convert_to_internal_xattr_flags(flags);
+
+- if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
+- gossip_err
+- ("orangefs_inode_setxattr: bogus key size (%d)\n",
+- (int)(strlen(name)));
+- return -EINVAL;
+- }
+-
+ /* This is equivalent to a removexattr */
+ if (size == 0 && value == NULL) {
+ gossip_debug(GOSSIP_XATTR_DEBUG,
+@@ -358,7 +348,7 @@ ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size)
+
+ returned_count = new_op->downcall.resp.listxattr.returned_count;
+ if (returned_count < 0 ||
+- returned_count >= ORANGEFS_MAX_XATTR_LISTLEN) {
++ returned_count > ORANGEFS_MAX_XATTR_LISTLEN) {
+ gossip_err("%s: impossible value for returned_count:%d:\n",
+ __func__,
+ returned_count);
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index 16e06dd89457..2e1f16d22c16 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -209,7 +209,7 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
+ if (err)
+ goto out_dput;
+
+- if (ovl_type_merge(dentry->d_parent)) {
++ if (ovl_type_merge(dentry->d_parent) && d_is_dir(newdentry)) {
+ /* Setting opaque here is just an optimization, allow to fail */
+ ovl_set_opaque(dentry, newdentry);
+ }
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index 729677e18e36..8e6ae68abf8d 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -705,6 +705,7 @@ int pstore_register(struct pstore_info *psi)
+ if (psi->flags & PSTORE_FLAGS_PMSG)
+ pstore_register_pmsg();
+
++ /* Start watching for new records, if desired. */
+ if (pstore_update_ms >= 0) {
+ pstore_timer.expires = jiffies +
+ msecs_to_jiffies(pstore_update_ms);
+@@ -727,6 +728,11 @@ EXPORT_SYMBOL_GPL(pstore_register);
+
+ void pstore_unregister(struct pstore_info *psi)
+ {
++ /* Stop timer and make sure all work has finished. */
++ pstore_update_ms = -1;
++ del_timer_sync(&pstore_timer);
++ flush_work(&pstore_work);
++
+ if (psi->flags & PSTORE_FLAGS_PMSG)
+ pstore_unregister_pmsg();
+ if (psi->flags & PSTORE_FLAGS_FTRACE)
+@@ -826,7 +832,9 @@ static void pstore_timefunc(unsigned long dummy)
+ schedule_work(&pstore_work);
+ }
+
+- mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
++ if (pstore_update_ms >= 0)
++ mod_timer(&pstore_timer,
++ jiffies + msecs_to_jiffies(pstore_update_ms));
+ }
+
+ module_param(backend, charp, 0444);
+diff --git a/fs/xattr.c b/fs/xattr.c
+index 7e3317cf4045..94f49a082dd2 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -530,7 +530,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
+ size = XATTR_SIZE_MAX;
+ kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (!kvalue) {
+- kvalue = vmalloc(size);
++ kvalue = vzalloc(size);
+ if (!kvalue)
+ return -ENOMEM;
+ }
+diff --git a/include/linux/dax.h b/include/linux/dax.h
+index 24ad71173995..f6cc5ef33e35 100644
+--- a/include/linux/dax.h
++++ b/include/linux/dax.h
+@@ -41,7 +41,6 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
+ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+ struct iomap_ops *ops);
+ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+-int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
+ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
+ pgoff_t index);
+ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 3202aa17492c..f1aef1639204 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -354,7 +354,7 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
+
+ cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
+ if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
+- free_cpumask_var(pd->cpumask.cbcpu);
++ free_cpumask_var(pd->cpumask.pcpu);
+ return -ENOMEM;
+ }
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 1aec370bf9e9..ab8b3ce14dc8 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -3144,6 +3144,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ enum compact_priority prio, enum compact_result *compact_result)
+ {
+ struct page *page;
++ unsigned int noreclaim_flag = current->flags & PF_MEMALLOC;
+
+ if (!order)
+ return NULL;
+@@ -3151,7 +3152,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ current->flags |= PF_MEMALLOC;
+ *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
+ prio);
+- current->flags &= ~PF_MEMALLOC;
++ current->flags = (current->flags & ~PF_MEMALLOC) | noreclaim_flag;
+
+ if (*compact_result <= COMPACT_INACTIVE)
+ return NULL;
+diff --git a/mm/truncate.c b/mm/truncate.c
+index dd7b24e083c5..c2d7deae28bb 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -66,17 +66,14 @@ static void truncate_exceptional_entry(struct address_space *mapping,
+
+ /*
+ * Invalidate exceptional entry if easily possible. This handles exceptional
+- * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
+- * clean entries.
++ * entries for invalidate_inode_pages().
+ */
+ static int invalidate_exceptional_entry(struct address_space *mapping,
+ pgoff_t index, void *entry)
+ {
+- /* Handled by shmem itself */
+- if (shmem_mapping(mapping))
++ /* Handled by shmem itself, or for DAX we do nothing. */
++ if (shmem_mapping(mapping) || dax_mapping(mapping))
+ return 1;
+- if (dax_mapping(mapping))
+- return dax_invalidate_mapping_entry(mapping, index);
+ clear_shadow_entry(mapping, index, entry);
+ return 1;
+ }
+@@ -685,6 +682,17 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
+ cond_resched();
+ index++;
+ }
++ /*
++ * For DAX we invalidate page tables after invalidating radix tree. We
++ * could invalidate page tables while invalidating each entry however
++ * that would be expensive. And doing range unmapping before doesn't
++ * work as we have no cheap way to find whether radix tree entry didn't
++ * get remapped later.
++ */
++ if (dax_mapping(mapping)) {
++ unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT,
++ (loff_t)(end - start + 1) << PAGE_SHIFT, 0);
++ }
+ cleancache_invalidate_inode(mapping);
+ return ret;
+ }
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 48f9471e7c85..c88a6007e643 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -1680,7 +1680,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ if (msg->msg_flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+- if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
++ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
++ MSG_CMSG_COMPAT))
+ return -EINVAL;
+
+ if (len < 4 || len > HCI_MAX_FRAME_SIZE)
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index f4947e737f34..d797baa69e43 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -760,7 +760,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
+
+ if (msg_data_left(msg) < chunk) {
+ if (__skb_checksum_complete(skb))
+- goto csum_error;
++ return -EINVAL;
+ if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
+ goto fault;
+ } else {
+@@ -768,15 +768,16 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
+ if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
+ chunk, &csum))
+ goto fault;
+- if (csum_fold(csum))
+- goto csum_error;
++
++ if (csum_fold(csum)) {
++ iov_iter_revert(&msg->msg_iter, chunk);
++ return -EINVAL;
++ }
++
+ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
+ netdev_rx_csum_fault(skb->dev);
+ }
+ return 0;
+-csum_error:
+- iov_iter_revert(&msg->msg_iter, chunk);
+- return -EINVAL;
+ fault:
+ return -EFAULT;
+ }
+diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
+new file mode 100644
+index 000000000000..745b4b1b8b21
+--- /dev/null
++++ b/tools/perf/arch/s390/annotate/instructions.c
+@@ -0,0 +1,30 @@
++static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *name)
++{
++ struct ins_ops *ops = NULL;
++
++ /* catch all kind of jumps */
++ if (strchr(name, 'j') ||
++ !strncmp(name, "bct", 3) ||
++ !strncmp(name, "br", 2))
++ ops = &jump_ops;
++ /* override call/returns */
++ if (!strcmp(name, "bras") ||
++ !strcmp(name, "brasl") ||
++ !strcmp(name, "basr"))
++ ops = &call_ops;
++ if (!strcmp(name, "br"))
++ ops = &ret_ops;
++
++ arch__associate_ins_ops(arch, name, ops);
++ return ops;
++}
++
++static int s390__annotate_init(struct arch *arch)
++{
++ if (!arch->initialized) {
++ arch->initialized = true;
++ arch->associate_instruction_ops = s390__associate_ins_ops;
++ }
++
++ return 0;
++}
+diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
+index cea3e7958cde..bd58651d6040 100644
+--- a/tools/perf/util/annotate.c
++++ b/tools/perf/util/annotate.c
+@@ -108,6 +108,7 @@ static int arch__associate_ins_ops(struct arch* arch, const char *name, struct i
+ #include "arch/arm64/annotate/instructions.c"
+ #include "arch/x86/annotate/instructions.c"
+ #include "arch/powerpc/annotate/instructions.c"
++#include "arch/s390/annotate/instructions.c"
+
+ static struct arch architectures[] = {
+ {
+@@ -132,6 +133,13 @@ static struct arch architectures[] = {
+ },
+ {
+ .name = "s390",
++ .init = s390__annotate_init,
++ .objdump = {
++ .comment_char = '#',
++ },
++ },
++ {
++ .name = "s390",
+ .objdump = {
+ .comment_char = '#',
+ },
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
+index c5a6e0b12452..78bd632f144d 100644
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -1826,7 +1826,7 @@ static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
+ filt->addr = start;
+ if (filt->range && !filt->size && !filt->sym_to) {
+ filt->size = size;
+- no_size = !!size;
++ no_size = !size;
+ }
+ }
+
+@@ -1840,7 +1840,7 @@ static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
+ if (err)
+ return err;
+ filt->size = start + size - filt->addr;
+- no_size = !!size;
++ no_size = !size;
+ }
+
+ /* The very last symbol in kallsyms does not imply a particular size */
+diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
+index 4af47079cf04..e717fed80219 100644
+--- a/tools/testing/selftests/x86/ldt_gdt.c
++++ b/tools/testing/selftests/x86/ldt_gdt.c
+@@ -403,6 +403,51 @@ static void *threadproc(void *ctx)
+ }
+ }
+
++#ifdef __i386__
++
++#ifndef SA_RESTORE
++#define SA_RESTORER 0x04000000
++#endif
++
++/*
++ * The UAPI header calls this 'struct sigaction', which conflicts with
++ * glibc. Sigh.
++ */
++struct fake_ksigaction {
++ void *handler; /* the real type is nasty */
++ unsigned long sa_flags;
++ void (*sa_restorer)(void);
++ unsigned char sigset[8];
++};
++
++static void fix_sa_restorer(int sig)
++{
++ struct fake_ksigaction ksa;
++
++ if (syscall(SYS_rt_sigaction, sig, NULL, &ksa, 8) == 0) {
++ /*
++ * glibc has a nasty bug: it sometimes writes garbage to
++ * sa_restorer. This interacts quite badly with anything
++ * that fiddles with SS because it can trigger legacy
++ * stack switching. Patch it up. See:
++ *
++ * https://sourceware.org/bugzilla/show_bug.cgi?id=21269
++ */
++ if (!(ksa.sa_flags & SA_RESTORER) && ksa.sa_restorer) {
++ ksa.sa_restorer = NULL;
++ if (syscall(SYS_rt_sigaction, sig, &ksa, NULL,
++ sizeof(ksa.sigset)) != 0)
++ err(1, "rt_sigaction");
++ }
++ }
++}
++#else
++static void fix_sa_restorer(int sig)
++{
++ /* 64-bit glibc works fine. */
++}
++#endif
++
+ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+ int flags)
+ {
+@@ -414,6 +459,7 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+ if (sigaction(sig, &sa, 0))
+ err(1, "sigaction");
+
++ fix_sa_restorer(sig);
+ }
+
+ static jmp_buf jmpbuf;