summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2018-02-03 16:22:15 -0500
committerMike Pagano <mpagano@gentoo.org>2018-02-03 16:22:15 -0500
commite42ba89e229f13dcf6d2c76301b94f4b18b7ddbf (patch)
tree961b25b801ed4570660929a39ad74b9c7202c4be
parentlinux kernel 4.9.79 (diff)
downloadlinux-patches-e42ba89e.tar.gz
linux-patches-e42ba89e.tar.bz2
linux-patches-e42ba89e.zip
Linux patch 4.9.804.9-83
-rw-r--r--0000_README4
-rw-r--r--1079_linux-4.9.80.patch2366
2 files changed, 2370 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index d0865d54..0abed7cc 100644
--- a/0000_README
+++ b/0000_README
@@ -359,6 +359,10 @@ Patch: 1078_linux-4.9.79.patch
From: http://www.kernel.org
Desc: Linux 4.9.79
+Patch: 1079_linux-4.9.80.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.80
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1079_linux-4.9.80.patch b/1079_linux-4.9.80.patch
new file mode 100644
index 00000000..26370c46
--- /dev/null
+++ b/1079_linux-4.9.80.patch
@@ -0,0 +1,2366 @@
+diff --git a/Makefile b/Makefile
+index 4a7e6dff1c2e..9550b6939076 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 79
++SUBLEVEL = 80
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
+index 7c9e0fae9bb9..65e0db1d3bd7 100644
+--- a/arch/arm/boot/dts/bcm-nsp.dtsi
++++ b/arch/arm/boot/dts/bcm-nsp.dtsi
+@@ -85,7 +85,7 @@
+ timer@20200 {
+ compatible = "arm,cortex-a9-global-timer";
+ reg = <0x20200 0x100>;
+- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&periph_clk>;
+ };
+
+@@ -93,7 +93,7 @@
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0x20600 0x20>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
+- IRQ_TYPE_LEVEL_HIGH)>;
++ IRQ_TYPE_EDGE_RISING)>;
+ clocks = <&periph_clk>;
+ };
+
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index aa8b0672f87a..d9ae404f08c9 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -906,7 +906,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
+
+ if (sg_is_last(req->src) &&
+ req->src->offset + req->src->length <= PAGE_SIZE &&
+- sg_is_last(req->dst) &&
+++ sg_is_last(req->dst) && req->dst->length &&
+ req->dst->offset + req->dst->length <= PAGE_SIZE) {
+ one_entry_in_sg = 1;
+ scatterwalk_start(&src_sg_walk, req->src);
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index cbd1d44da2d3..20cfeeb681c6 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1113,7 +1113,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
+ int emulation_type)
+ {
+- return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
++ return x86_emulate_instruction(vcpu, 0,
++ emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
+ }
+
+ void kvm_enable_efer_bits(u64);
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index c8f8dd8ca0a1..6f5a3b076341 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4990,6 +4990,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+ bool op_prefix = false;
+ bool has_seg_override = false;
+ struct opcode opcode;
++ u16 dummy;
++ struct desc_struct desc;
+
+ ctxt->memop.type = OP_NONE;
+ ctxt->memopp = NULL;
+@@ -5008,6 +5010,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+ switch (mode) {
+ case X86EMUL_MODE_REAL:
+ case X86EMUL_MODE_VM86:
++ def_op_bytes = def_ad_bytes = 2;
++ ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
++ if (desc.d)
++ def_op_bytes = def_ad_bytes = 4;
++ break;
+ case X86EMUL_MODE_PROT16:
+ def_op_bytes = def_ad_bytes = 2;
+ break;
+diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
+index 6e219e5c07d2..5f810bb80802 100644
+--- a/arch/x86/kvm/ioapic.c
++++ b/arch/x86/kvm/ioapic.c
+@@ -257,8 +257,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
+ index == RTC_GSI) {
+ if (kvm_apic_match_dest(vcpu, NULL, 0,
+ e->fields.dest_id, e->fields.dest_mode) ||
+- (e->fields.trig_mode == IOAPIC_EDGE_TRIG &&
+- kvm_apic_pending_eoi(vcpu, e->fields.vector)))
++ kvm_apic_pending_eoi(vcpu, e->fields.vector))
+ __set_bit(e->fields.vector,
+ ioapic_handled_vectors);
+ }
+@@ -279,6 +278,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
+ {
+ unsigned index;
+ bool mask_before, mask_after;
++ int old_remote_irr, old_delivery_status;
+ union kvm_ioapic_redirect_entry *e;
+
+ switch (ioapic->ioregsel) {
+@@ -301,14 +301,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
+ return;
+ e = &ioapic->redirtbl[index];
+ mask_before = e->fields.mask;
++ /* Preserve read-only fields */
++ old_remote_irr = e->fields.remote_irr;
++ old_delivery_status = e->fields.delivery_status;
+ if (ioapic->ioregsel & 1) {
+ e->bits &= 0xffffffff;
+ e->bits |= (u64) val << 32;
+ } else {
+ e->bits &= ~0xffffffffULL;
+ e->bits |= (u32) val;
+- e->fields.remote_irr = 0;
+ }
++ e->fields.remote_irr = old_remote_irr;
++ e->fields.delivery_status = old_delivery_status;
++
++ /*
++ * Some OSes (Linux, Xen) assume that Remote IRR bit will
++ * be cleared by IOAPIC hardware when the entry is configured
++ * as edge-triggered. This behavior is used to simulate an
++ * explicit EOI on IOAPICs that don't have the EOI register.
++ */
++ if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
++ e->fields.remote_irr = 0;
++
+ mask_after = e->fields.mask;
+ if (mask_before != mask_after)
+ kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 3ca6d15994e4..178a344f55f8 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -5194,7 +5194,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+ vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
+ }
+
+- vmcs_writel(GUEST_RFLAGS, 0x02);
++ kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
+ kvm_rip_write(vcpu, 0xfff0);
+
+ vmcs_writel(GUEST_GDTR_BASE, 0);
+@@ -6257,7 +6257,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
+ if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
+ return 1;
+
+- err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
++ err = emulate_instruction(vcpu, 0);
+
+ if (err == EMULATE_USER_EXIT) {
+ ++vcpu->stat.mmio_exits;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index d3f80cccb9aa..e023ef981feb 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1751,10 +1751,13 @@ static u64 __get_kvmclock_ns(struct kvm *kvm)
+ /* both __this_cpu_read() and rdtsc() should be on the same cpu */
+ get_cpu();
+
+- kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
+- &hv_clock.tsc_shift,
+- &hv_clock.tsc_to_system_mul);
+- ret = __pvclock_read_cycles(&hv_clock, rdtsc());
++ if (__this_cpu_read(cpu_tsc_khz)) {
++ kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
++ &hv_clock.tsc_shift,
++ &hv_clock.tsc_to_system_mul);
++ ret = __pvclock_read_cycles(&hv_clock, rdtsc());
++ } else
++ ret = ktime_get_boot_ns() + ka->kvmclock_offset;
+
+ put_cpu();
+
+@@ -5308,7 +5311,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+- r = EMULATE_FAIL;
++ r = EMULATE_USER_EXIT;
+ }
+ kvm_queue_exception(vcpu, UD_VECTOR);
+
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index 84d71482bf08..ab0d93ab5695 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -120,7 +120,7 @@ config CRYPTO_DH
+
+ config CRYPTO_ECDH
+ tristate "ECDH algorithm"
+- select CRYTPO_KPP
++ select CRYPTO_KPP
+ help
+ Generic implementation of the ECDH algorithm
+
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index f5e18c2a4852..ca50eeb13097 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent);
+
+ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ {
+- const u32 forbidden = CRYPTO_ALG_INTERNAL;
++ const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct sockaddr_alg *sa = (void *)uaddr;
+@@ -157,6 +157,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ void *private;
+ int err;
+
++ /* If caller uses non-allowed flag, return error. */
++ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
++ return -EINVAL;
++
+ if (sock->state == SS_CONNECTED)
+ return -EINVAL;
+
+@@ -175,9 +179,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ if (IS_ERR(type))
+ return PTR_ERR(type);
+
+- private = type->bind(sa->salg_name,
+- sa->salg_feat & ~forbidden,
+- sa->salg_mask & ~forbidden);
++ private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
+ if (IS_ERR(private)) {
+ module_put(type->owner);
+ return PTR_ERR(private);
+diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
+index 7e8ed96236ce..a68be626017c 100644
+--- a/crypto/sha3_generic.c
++++ b/crypto/sha3_generic.c
+@@ -18,6 +18,7 @@
+ #include <linux/types.h>
+ #include <crypto/sha3.h>
+ #include <asm/byteorder.h>
++#include <asm/unaligned.h>
+
+ #define KECCAK_ROUNDS 24
+
+@@ -149,7 +150,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int i;
+
+ for (i = 0; i < sctx->rsizw; i++)
+- sctx->st[i] ^= ((u64 *) src)[i];
++ sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
+ keccakf(sctx->st);
+
+ done += sctx->rsiz;
+@@ -174,7 +175,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out)
+ sctx->buf[sctx->rsiz - 1] |= 0x80;
+
+ for (i = 0; i < sctx->rsizw; i++)
+- sctx->st[i] ^= ((u64 *) sctx->buf)[i];
++ sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
+
+ keccakf(sctx->st);
+
+diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
+index 7b2c48fde4e2..201c7ceb7052 100644
+--- a/drivers/acpi/device_sysfs.c
++++ b/drivers/acpi/device_sysfs.c
+@@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
+ int count;
+ struct acpi_hardware_id *id;
+
++ /* Avoid unnecessarily loading modules for non present devices. */
++ if (!acpi_device_is_present(acpi_dev))
++ return 0;
++
+ /*
+ * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
+ * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
+diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
+index 10e1b9eee10e..f03cf1df8d6b 100644
+--- a/drivers/auxdisplay/Kconfig
++++ b/drivers/auxdisplay/Kconfig
+@@ -121,6 +121,7 @@ config CFAG12864B_RATE
+
+ config IMG_ASCII_LCD
+ tristate "Imagination Technologies ASCII LCD Display"
++ depends on HAS_IOMEM
+ default y if MIPS_MALTA || MIPS_SEAD3
+ select SYSCON
+ help
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 24d6cefceb32..402254d26247 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1558,9 +1558,8 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
+ return err;
+ }
+
+-static void lo_release(struct gendisk *disk, fmode_t mode)
++static void __lo_release(struct loop_device *lo)
+ {
+- struct loop_device *lo = disk->private_data;
+ int err;
+
+ if (atomic_dec_return(&lo->lo_refcnt))
+@@ -1586,6 +1585,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
+ mutex_unlock(&lo->lo_ctl_mutex);
+ }
+
++static void lo_release(struct gendisk *disk, fmode_t mode)
++{
++ mutex_lock(&loop_index_mutex);
++ __lo_release(disk->private_data);
++ mutex_unlock(&loop_index_mutex);
++}
++
+ static const struct block_device_operations lo_fops = {
+ .owner = THIS_MODULE,
+ .open = lo_open,
+diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
+index d8b164a7c4e5..cac26fb22891 100644
+--- a/drivers/cpufreq/Kconfig
++++ b/drivers/cpufreq/Kconfig
+@@ -273,6 +273,7 @@ endif
+ if MIPS
+ config LOONGSON2_CPUFREQ
+ tristate "Loongson2 CPUFreq Driver"
++ depends on LEMOTE_MACH2F
+ help
+ This option adds a CPUFreq driver for loongson processors which
+ support software configurable cpu frequency.
+@@ -285,6 +286,7 @@ config LOONGSON2_CPUFREQ
+
+ config LOONGSON1_CPUFREQ
+ tristate "Loongson1 CPUFreq Driver"
++ depends on LOONGSON1_LS1B
+ help
+ This option adds a CPUFreq driver for loongson1 processors which
+ support software configurable cpu frequency.
+diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
+index dc37dbe4b46d..a83e97e15c14 100644
+--- a/drivers/gpio/gpio-ath79.c
++++ b/drivers/gpio/gpio-ath79.c
+@@ -323,3 +323,6 @@ static struct platform_driver ath79_gpio_driver = {
+ };
+
+ module_platform_driver(ath79_gpio_driver);
++
++MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
+index 98c7ff2a76e7..8d62db447ec1 100644
+--- a/drivers/gpio/gpio-iop.c
++++ b/drivers/gpio/gpio-iop.c
+@@ -58,3 +58,7 @@ static int __init iop3xx_gpio_init(void)
+ return platform_driver_register(&iop3xx_gpio_driver);
+ }
+ arch_initcall(iop3xx_gpio_init);
++
++MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
++MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
+index adba614b3965..abb5a2752511 100644
+--- a/drivers/gpio/gpio-stmpe.c
++++ b/drivers/gpio/gpio-stmpe.c
+@@ -190,6 +190,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
+ };
+ int i, j;
+
++ /*
++ * STMPE1600: to be able to get IRQ from pins,
++ * a read must be done on GPMR register, or a write in
++ * GPSR or GPCR registers
++ */
++ if (stmpe->partnum == STMPE1600) {
++ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
++ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
++ }
++
+ for (i = 0; i < CACHE_NR_REGS; i++) {
+ /* STMPE801 and STMPE1600 don't have RE and FE registers */
+ if ((stmpe->partnum == STMPE801 ||
+@@ -227,21 +237,11 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
+- struct stmpe *stmpe = stmpe_gpio->stmpe;
+ int offset = d->hwirq;
+ int regoffset = offset / 8;
+ int mask = BIT(offset % 8);
+
+ stmpe_gpio->regs[REG_IE][regoffset] |= mask;
+-
+- /*
+- * STMPE1600 workaround: to be able to get IRQ from pins,
+- * a read must be done on GPMR register, or a write in
+- * GPSR or GPCR registers
+- */
+- if (stmpe->partnum == STMPE1600)
+- stmpe_reg_read(stmpe,
+- stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]);
+ }
+
+ static void stmpe_dbg_show_one(struct seq_file *s,
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 063d176baa24..f3c3680963b9 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -705,6 +705,9 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
+ struct gpioevent_data ge;
+ int ret, level;
+
++ /* Do not leak kernel stack to userspace */
++ memset(&ge, 0, sizeof(ge));
++
+ ge.timestamp = ktime_get_real_ns();
+ level = gpiod_get_value_cansleep(le->desc);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index 1a0a5f7cccbc..47951f4775b9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -367,29 +367,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+ {
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct cik_sdma_rlc_registers *m;
++ unsigned long end_jiffies;
+ uint32_t sdma_base_addr;
++ uint32_t data;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+- m->sdma_rlc_virtual_addr);
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
++ m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
+- m->sdma_rlc_rb_base);
++ end_jiffies = msecs_to_jiffies(2000) + jiffies;
++ while (true) {
++ data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
++ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
++ break;
++ if (time_after(jiffies, end_jiffies))
++ return -ETIME;
++ usleep_range(500, 1000);
++ }
++ if (m->sdma_engine_id) {
++ data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
++ data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
++ RESUME_CTX, 0);
++ WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
++ } else {
++ data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
++ data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
++ RESUME_CTX, 0);
++ WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
++ }
+
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
++ m->sdma_rlc_doorbell);
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
++ m->sdma_rlc_virtual_addr);
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ m->sdma_rlc_rb_base_hi);
+-
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdma_rlc_rb_rptr_addr_lo);
+-
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdma_rlc_rb_rptr_addr_hi);
+-
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+- m->sdma_rlc_doorbell);
+-
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ m->sdma_rlc_rb_cntl);
+
+@@ -493,9 +514,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ }
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
++ RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
++ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index f26d1fd53bef..cb505f66d3aa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -416,6 +416,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+ if (candidate == lobj)
+ break;
+
++ /* We can't move pinned BOs here */
++ if (bo->pin_count)
++ continue;
++
+ other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+ /* Check if this BO is in one of the domains we need space for */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+index d83de985e88c..8577a563600f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+@@ -215,8 +215,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ BUG_ON(!mm || !mqd || !q);
+
+ m = get_sdma_mqd(mqd);
+- m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
+- SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
++ m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
++ << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+ q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
+ 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
+ 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index e1fb40b84c72..5425c68d0287 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -205,6 +205,24 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+
+ switch (type) {
+ case KFD_QUEUE_TYPE_SDMA:
++ if (dev->dqm->queue_count >=
++ CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
++ pr_err("Over-subscription is not allowed for SDMA.\n");
++ retval = -EPERM;
++ goto err_create_queue;
++ }
++
++ retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
++ if (retval != 0)
++ goto err_create_queue;
++ pqn->q = q;
++ pqn->kq = NULL;
++ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
++ &q->properties.vmid);
++ pr_debug("DQM returned %d for create_queue\n", retval);
++ print_queue(q);
++ break;
++
+ case KFD_QUEUE_TYPE_COMPUTE:
+ /* check if there is over subscription */
+ if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index 44d476ea6d2e..f64f35cdc2ff 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -97,7 +97,7 @@
+ #define DP0_ACTIVEVAL 0x0650
+ #define DP0_SYNCVAL 0x0654
+ #define DP0_MISC 0x0658
+-#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */
++#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
+ #define BPC_6 (0 << 5)
+ #define BPC_8 (1 << 5)
+
+@@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
+ tmp = (tmp << 8) | buf[i];
+ i++;
+ if (((i % 4) == 0) || (i == size)) {
+- tc_write(DP0_AUXWDATA(i >> 2), tmp);
++ tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
+ tmp = 0;
+ }
+ }
+@@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc)
+ ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
+ if (ret < 0)
+ goto err_dpcd_read;
+- if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
+- goto err_dpcd_inval;
++ if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
++ dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
++ tc->link.base.rate = 270000;
++ }
++
++ if (tc->link.base.num_lanes > 2) {
++ dev_dbg(tc->dev, "Falling to 2 lanes\n");
++ tc->link.base.num_lanes = 2;
++ }
+
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
+ if (ret < 0)
+@@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc)
+ err_dpcd_read:
+ dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
+ return ret;
+-err_dpcd_inval:
+- dev_err(tc->dev, "invalid DPCD\n");
+- return -EINVAL;
+ }
+
+ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+@@ -655,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+ int lower_margin = mode->vsync_start - mode->vdisplay;
+ int vsync_len = mode->vsync_end - mode->vsync_start;
+
++ /*
++ * Recommended maximum number of symbols transferred in a transfer unit:
++ * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
++ * (output active video bandwidth in bytes))
++ * Must be less than tu_size.
++ */
++ max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
++
+ dev_dbg(tc->dev, "set mode %dx%d\n",
+ mode->hdisplay, mode->vdisplay);
+ dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
+@@ -664,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+ dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
+
+
+- /* LCD Ctl Frame Size */
+- tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
++ /*
++ * LCD Ctl Frame Size
++ * datasheet is not clear of vsdelay in case of DPI
++ * assume we do not need any delay when DPI is a source of
++ * sync signals
++ */
++ tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
+ OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
+- tc_write(HTIM01, (left_margin << 16) | /* H back porch */
+- (hsync_len << 0)); /* Hsync */
+- tc_write(HTIM02, (right_margin << 16) | /* H front porch */
+- (mode->hdisplay << 0)); /* width */
++ tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
++ (ALIGN(hsync_len, 2) << 0)); /* Hsync */
++ tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */
++ (ALIGN(mode->hdisplay, 2) << 0)); /* width */
+ tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
+ (vsync_len << 0)); /* Vsync */
+ tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
+@@ -689,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+ /* DP Main Stream Attributes */
+ vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
+ tc_write(DP0_VIDSYNCDELAY,
+- (0x003e << 16) | /* thresh_dly */
++ (max_tu_symbol << 16) | /* thresh_dly */
+ (vid_sync_dly << 0));
+
+ tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
+@@ -705,14 +722,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+ tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
+ DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
+
+- /*
+- * Recommended maximum number of symbols transferred in a transfer unit:
+- * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+- * (output active video bandwidth in bytes))
+- * Must be less than tu_size.
+- */
+- max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+- tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
++ tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
++ BPC_8);
+
+ return 0;
+ err:
+@@ -808,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc)
+ unsigned int rate;
+ u32 dp_phy_ctrl;
+ int timeout;
+- bool aligned;
+- bool ready;
+ u32 value;
+ int ret;
+ u8 tmp[8];
+@@ -954,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc)
+ ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
+ if (ret < 0)
+ goto err_dpcd_read;
+- ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
+- DP_CHANNEL_EQ_BITS)); /* Lane0 */
+- aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
+- } while ((--timeout) && !(ready && aligned));
++ } while ((--timeout) &&
++ !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes)));
+
+ if (timeout == 0) {
+ /* Read DPCD 0x200-0x201 */
+ ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
+ if (ret < 0)
+ goto err_dpcd_read;
++ dev_err(dev, "channel(s) EQ not ok\n");
+ dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
+ dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
+ tmp[1]);
+@@ -974,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc)
+ dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
+ tmp[6]);
+
+- if (!ready)
+- dev_err(dev, "Lane0/1 not ready\n");
+- if (!aligned)
+- dev_err(dev, "Lane0/1 not aligned\n");
+ return -EAGAIN;
+ }
+
+@@ -1105,7 +1109,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
+ static int tc_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+- /* Accept any mode */
++ /* DPI interface clock limitation: upto 154 MHz */
++ if (mode->clock > 154000)
++ return MODE_CLOCK_HIGH;
++
+ return MODE_OK;
+ }
+
+diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+index 4ceed7a9762f..4b83e9eeab06 100644
+--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+@@ -638,7 +638,8 @@ static int omap_dmm_probe(struct platform_device *dev)
+ match = of_match_node(dmm_of_match, dev->dev.of_node);
+ if (!match) {
+ dev_err(&dev->dev, "failed to find matching device node\n");
+- return -ENODEV;
++ ret = -ENODEV;
++ goto fail;
+ }
+
+ omap_dmm->plat_data = match->data;
+diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
+index 094bc6a475c1..d96c084d3a76 100644
+--- a/drivers/gpu/drm/vc4/vc4_irq.c
++++ b/drivers/gpu/drm/vc4/vc4_irq.c
+@@ -225,6 +225,9 @@ vc4_irq_uninstall(struct drm_device *dev)
+ /* Clear any pending interrupts we might have left. */
+ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
+
++ /* Finish any interrupt handler still in flight. */
++ disable_irq(dev->irq);
++
+ cancel_work_sync(&vc4->overflow_mem_work);
+ }
+
+diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
+index 7cc346ad9b0b..ce7c21d250cf 100644
+--- a/drivers/gpu/drm/vc4/vc4_v3d.c
++++ b/drivers/gpu/drm/vc4/vc4_v3d.c
+@@ -173,6 +173,9 @@ static int vc4_v3d_runtime_resume(struct device *dev)
+ struct vc4_dev *vc4 = v3d->vc4;
+
+ vc4_v3d_init_hw(vc4->dev);
++
++ /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
++ enable_irq(vc4->dev->irq);
+ vc4_irq_postinstall(vc4->dev);
+
+ return 0;
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index d72dfb2bbdb8..7a4d39ce51d9 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -2192,23 +2192,23 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
+ int i;
+ unsigned long flags;
+
+- spin_lock_irqsave(&remote->remote_lock, flags);
+- remote->remotes[index].registered = false;
+- spin_unlock_irqrestore(&remote->remote_lock, flags);
++ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
++ if (remote->remotes[i].serial == serial) {
+
+- if (remote->remotes[index].battery.battery)
+- devres_release_group(&wacom->hdev->dev,
+- &remote->remotes[index].battery.bat_desc);
++ spin_lock_irqsave(&remote->remote_lock, flags);
++ remote->remotes[i].registered = false;
++ spin_unlock_irqrestore(&remote->remote_lock, flags);
+
+- if (remote->remotes[index].group.name)
+- devres_release_group(&wacom->hdev->dev,
+- &remote->remotes[index]);
++ if (remote->remotes[i].battery.battery)
++ devres_release_group(&wacom->hdev->dev,
++ &remote->remotes[i].battery.bat_desc);
++
++ if (remote->remotes[i].group.name)
++ devres_release_group(&wacom->hdev->dev,
++ &remote->remotes[i]);
+
+- for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+- if (remote->remotes[i].serial == serial) {
+ remote->remotes[i].serial = 0;
+ remote->remotes[i].group.name = NULL;
+- remote->remotes[i].registered = false;
+ remote->remotes[i].battery.battery = NULL;
+ wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
+ }
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index ba59eaef2e07..d013acf3f83a 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -20,6 +20,7 @@
+ */
+
+ #include <linux/kernel.h>
++#include <linux/math64.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/err.h>
+@@ -476,8 +477,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
+ static long pmbus_reg2data_direct(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
+ {
+- long val = (s16) sensor->data;
+- long m, b, R;
++ s64 b, val = (s16)sensor->data;
++ s32 m, R;
+
+ m = data->info->m[sensor->class];
+ b = data->info->b[sensor->class];
+@@ -505,11 +506,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
+ R--;
+ }
+ while (R < 0) {
+- val = DIV_ROUND_CLOSEST(val, 10);
++ val = div_s64(val + 5LL, 10L); /* round closest */
+ R++;
+ }
+
+- return (val - b) / m;
++ val = div_s64(val - b, m);
++ return clamp_val(val, LONG_MIN, LONG_MAX);
+ }
+
+ /*
+@@ -629,7 +631,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
+ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
+ struct pmbus_sensor *sensor, long val)
+ {
+- long m, b, R;
++ s64 b, val64 = val;
++ s32 m, R;
+
+ m = data->info->m[sensor->class];
+ b = data->info->b[sensor->class];
+@@ -646,18 +649,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
+ R -= 3; /* Adjust R and b for data in milli-units */
+ b *= 1000;
+ }
+- val = val * m + b;
++ val64 = val64 * m + b;
+
+ while (R > 0) {
+- val *= 10;
++ val64 *= 10;
+ R--;
+ }
+ while (R < 0) {
+- val = DIV_ROUND_CLOSEST(val, 10);
++ val64 = div_s64(val64 + 5LL, 10L); /* round closest */
+ R++;
+ }
+
+- return val;
++ return (u16)clamp_val(val64, S16_MIN, S16_MAX);
+ }
+
+ static u16 pmbus_data2reg_vid(struct pmbus_data *data,
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index a2120ff0ef4c..5e29fbd3a5a0 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -2575,6 +2575,18 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
+ return ret;
+ }
+
++static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
++{
++ switch (umr_fence_cap) {
++ case MLX5_CAP_UMR_FENCE_NONE:
++ return MLX5_FENCE_MODE_NONE;
++ case MLX5_CAP_UMR_FENCE_SMALL:
++ return MLX5_FENCE_MODE_INITIATOR_SMALL;
++ default:
++ return MLX5_FENCE_MODE_STRONG_ORDERING;
++ }
++}
++
+ static int create_dev_resources(struct mlx5_ib_resources *devr)
+ {
+ struct ib_srq_init_attr attr;
+@@ -3101,6 +3113,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+
+ mlx5_ib_internal_fill_odp_caps(dev);
+
++ dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
++
+ if (MLX5_CAP_GEN(mdev, imaicl)) {
+ dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
+ dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index 86e1e08125ff..d5cc954e8ac2 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -345,7 +345,7 @@ struct mlx5_ib_qp {
+ struct mlx5_ib_wq rq;
+
+ u8 sq_signal_bits;
+- u8 fm_cache;
++ u8 next_fence;
+ struct mlx5_ib_wq sq;
+
+ /* serialize qp state modifications
+@@ -643,6 +643,7 @@ struct mlx5_ib_dev {
+ struct list_head qp_list;
+ /* Array with num_ports elements */
+ struct mlx5_ib_port *port;
++ u8 umr_fence;
+ };
+
+ static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 2665414b4875..fdd156101a72 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3755,24 +3755,6 @@ static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
+ }
+ }
+
+-static u8 get_fence(u8 fence, struct ib_send_wr *wr)
+-{
+- if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
+- wr->send_flags & IB_SEND_FENCE))
+- return MLX5_FENCE_MODE_STRONG_ORDERING;
+-
+- if (unlikely(fence)) {
+- if (wr->send_flags & IB_SEND_FENCE)
+- return MLX5_FENCE_MODE_SMALL_AND_FENCE;
+- else
+- return fence;
+- } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
+- return MLX5_FENCE_MODE_FENCE;
+- }
+-
+- return 0;
+-}
+-
+ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ struct ib_send_wr *wr, unsigned *idx,
+@@ -3801,8 +3783,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ static void finish_wqe(struct mlx5_ib_qp *qp,
+ struct mlx5_wqe_ctrl_seg *ctrl,
+ u8 size, unsigned idx, u64 wr_id,
+- int nreq, u8 fence, u8 next_fence,
+- u32 mlx5_opcode)
++ int nreq, u8 fence, u32 mlx5_opcode)
+ {
+ u8 opmod = 0;
+
+@@ -3810,7 +3791,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
+ mlx5_opcode | ((u32)opmod << 24));
+ ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
+ ctrl->fm_ce_se |= fence;
+- qp->fm_cache = next_fence;
+ if (unlikely(qp->wq_sig))
+ ctrl->signature = wq_sig(ctrl);
+
+@@ -3870,7 +3850,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ goto out;
+ }
+
+- fence = qp->fm_cache;
+ num_sge = wr->num_sge;
+ if (unlikely(num_sge > qp->sq.max_gs)) {
+ mlx5_ib_warn(dev, "\n");
+@@ -3887,6 +3866,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ goto out;
+ }
+
++ if (wr->opcode == IB_WR_LOCAL_INV ||
++ wr->opcode == IB_WR_REG_MR) {
++ fence = dev->umr_fence;
++ next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
++ } else if (wr->send_flags & IB_SEND_FENCE) {
++ if (qp->next_fence)
++ fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
++ else
++ fence = MLX5_FENCE_MODE_FENCE;
++ } else {
++ fence = qp->next_fence;
++ }
++
+ switch (ibqp->qp_type) {
+ case IB_QPT_XRC_INI:
+ xrc = seg;
+@@ -3913,7 +3905,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ goto out;
+
+ case IB_WR_LOCAL_INV:
+- next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+ qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
+ ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
+ set_linv_wr(qp, &seg, &size);
+@@ -3921,7 +3912,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ break;
+
+ case IB_WR_REG_MR:
+- next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+ qp->sq.wr_data[idx] = IB_WR_REG_MR;
+ ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
+ err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
+@@ -3944,9 +3934,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ goto out;
+ }
+
+- finish_wqe(qp, ctrl, size, idx, wr->wr_id,
+- nreq, get_fence(fence, wr),
+- next_fence, MLX5_OPCODE_UMR);
++ finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
++ fence, MLX5_OPCODE_UMR);
+ /*
+ * SET_PSV WQEs are not signaled and solicited
+ * on error
+@@ -3971,9 +3960,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ goto out;
+ }
+
+- finish_wqe(qp, ctrl, size, idx, wr->wr_id,
+- nreq, get_fence(fence, wr),
+- next_fence, MLX5_OPCODE_SET_PSV);
++ finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
++ fence, MLX5_OPCODE_SET_PSV);
+ err = begin_wqe(qp, &seg, &ctrl, wr,
+ &idx, &size, nreq);
+ if (err) {
+@@ -3983,7 +3971,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ goto out;
+ }
+
+- next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+ err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
+ mr->sig->psv_wire.psv_idx, &seg,
+ &size);
+@@ -3993,9 +3980,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ goto out;
+ }
+
+- finish_wqe(qp, ctrl, size, idx, wr->wr_id,
+- nreq, get_fence(fence, wr),
+- next_fence, MLX5_OPCODE_SET_PSV);
++ finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
++ fence, MLX5_OPCODE_SET_PSV);
++ qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+ num_sge = 0;
+ goto skip_psv;
+
+@@ -4100,8 +4087,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ }
+ }
+
+- finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+- get_fence(fence, wr), next_fence,
++ qp->next_fence = next_fence;
++ finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence,
+ mlx5_ib_opcode[wr->opcode]);
+ skip_psv:
+ if (0)
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 2efdce07247c..cac297f8170e 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -803,7 +803,10 @@ int bch_btree_cache_alloc(struct cache_set *c)
+ c->shrink.scan_objects = bch_mca_scan;
+ c->shrink.seeks = 4;
+ c->shrink.batch = c->btree_pages * 2;
+- register_shrinker(&c->shrink);
++
++ if (register_shrinker(&c->shrink))
++ pr_warn("bcache: %s: could not register shrinker",
++ __func__);
+
+ return 0;
+ }
+diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
+index dc76fd41e00f..0324633ede42 100644
+--- a/drivers/media/usb/usbtv/usbtv-core.c
++++ b/drivers/media/usb/usbtv/usbtv-core.c
+@@ -141,6 +141,7 @@ static void usbtv_disconnect(struct usb_interface *intf)
+
+ static struct usb_device_id usbtv_id_table[] = {
+ { USB_DEVICE(0x1b71, 0x3002) },
++ { USB_DEVICE(0x1f71, 0x3301) },
+ {}
+ };
+ MODULE_DEVICE_TABLE(usb, usbtv_id_table);
+diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
+index de31514df282..d38527e0a2f2 100644
+--- a/drivers/mtd/nand/denali_pci.c
++++ b/drivers/mtd/nand/denali_pci.c
+@@ -119,3 +119,7 @@ static struct pci_driver denali_pci_driver = {
+ };
+
+ module_pci_driver(denali_pci_driver);
++
++MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
++MODULE_AUTHOR("Intel Corporation and its suppliers");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index a7e04ff4eaed..cde4b96f3153 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -1843,8 +1843,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
+ /* Read A2 portion of the EEPROM */
+ if (length) {
+ start -= ETH_MODULE_SFF_8436_LEN;
+- bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
+- length, data);
++ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
++ start, length, data);
+ }
+ return rc;
+ }
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index ca54f7684668..3a61491421b1 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -3273,7 +3273,7 @@ static int __igb_close(struct net_device *netdev, bool suspending)
+
+ int igb_close(struct net_device *netdev)
+ {
+- if (netif_device_present(netdev))
++ if (netif_device_present(netdev) || netdev->dismantle)
+ return __igb_close(netdev, false);
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
+index 6d68c8a8f4f2..da4ec575ccf9 100644
+--- a/drivers/net/ethernet/xilinx/Kconfig
++++ b/drivers/net/ethernet/xilinx/Kconfig
+@@ -34,6 +34,7 @@ config XILINX_AXI_EMAC
+ config XILINX_LL_TEMAC
+ tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
+ depends on (PPC || MICROBLAZE)
++ depends on !64BIT || BROKEN
+ select PHYLIB
+ ---help---
+ This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+index d04babd99b53..ff5ce1ed03c4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+@@ -1040,6 +1040,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
+ return le32_to_cpu(txq_timer->p2p_go);
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return le32_to_cpu(txq_timer->p2p_device);
++ case NL80211_IFTYPE_MONITOR:
++ return default_timeout;
+ default:
+ WARN_ON(1);
+ return mvm->cfg->base_params->wd_timeout;
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 8d498a997e25..1a9dadf7b3cc 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -86,6 +86,8 @@ struct netfront_cb {
+ /* IRQ name is queue name with "-tx" or "-rx" appended */
+ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+
++static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
++
+ struct netfront_stats {
+ u64 packets;
+ u64 bytes;
+@@ -2051,10 +2053,12 @@ static void netback_changed(struct xenbus_device *dev,
+ break;
+
+ case XenbusStateClosed:
++ wake_up_all(&module_unload_q);
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
+ case XenbusStateClosing:
++ wake_up_all(&module_unload_q);
+ xenbus_frontend_closed(dev);
+ break;
+ }
+@@ -2160,6 +2164,20 @@ static int xennet_remove(struct xenbus_device *dev)
+
+ dev_dbg(&dev->dev, "%s\n", dev->nodename);
+
++ if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
++ xenbus_switch_state(dev, XenbusStateClosing);
++ wait_event(module_unload_q,
++ xenbus_read_driver_state(dev->otherend) ==
++ XenbusStateClosing);
++
++ xenbus_switch_state(dev, XenbusStateClosed);
++ wait_event(module_unload_q,
++ xenbus_read_driver_state(dev->otherend) ==
++ XenbusStateClosed ||
++ xenbus_read_driver_state(dev->otherend) ==
++ XenbusStateUnknown);
++ }
++
+ xennet_disconnect_backend(info);
+
+ unregister_netdev(info->netdev);
+diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
+index b0b1eb3a78c2..76153ac0706c 100644
+--- a/drivers/power/reset/zx-reboot.c
++++ b/drivers/power/reset/zx-reboot.c
+@@ -81,3 +81,7 @@ static struct platform_driver zx_reboot_driver = {
+ },
+ };
+ module_platform_driver(zx_reboot_driver);
++
++MODULE_DESCRIPTION("ZTE SoCs reset driver");
++MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 0aeecec1f5ea..e2962f15c189 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -1416,13 +1416,13 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
+ * will ensure that i/o is queisced and the card is flushed in that
+ * case.
+ */
++ aac_free_irq(aac);
+ aac_fib_map_free(aac);
+ pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
+ aac->comm_addr = NULL;
+ aac->comm_phys = 0;
+ kfree(aac->queues);
+ aac->queues = NULL;
+- aac_free_irq(aac);
+ kfree(aac->fsa_dev);
+ aac->fsa_dev = NULL;
+ quirks = aac_get_driver_ident(index)->quirks;
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 530034bc2d13..2e9341233f66 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -5327,12 +5327,15 @@ static int ufshcd_config_vreg(struct device *dev,
+ struct ufs_vreg *vreg, bool on)
+ {
+ int ret = 0;
+- struct regulator *reg = vreg->reg;
+- const char *name = vreg->name;
++ struct regulator *reg;
++ const char *name;
+ int min_uV, uA_load;
+
+ BUG_ON(!vreg);
+
++ reg = vreg->reg;
++ name = vreg->name;
++
+ if (regulator_count_voltages(reg) > 0) {
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index deb782f6556c..a6e34f05d44d 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1307,12 +1307,23 @@ static int spi_imx_remove(struct platform_device *pdev)
+ {
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
++ int ret;
+
+ spi_bitbang_stop(&spi_imx->bitbang);
+
++ ret = clk_enable(spi_imx->clk_per);
++ if (ret)
++ return ret;
++
++ ret = clk_enable(spi_imx->clk_ipg);
++ if (ret) {
++ clk_disable(spi_imx->clk_per);
++ return ret;
++ }
++
+ writel(0, spi_imx->base + MXC_CSPICTRL);
+- clk_unprepare(spi_imx->clk_ipg);
+- clk_unprepare(spi_imx->clk_per);
++ clk_disable_unprepare(spi_imx->clk_ipg);
++ clk_disable_unprepare(spi_imx->clk_per);
+ spi_imx_sdma_exit(spi_imx);
+ spi_master_put(master);
+
+diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+index 9e8802181452..e8d9db4d8179 100644
+--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+@@ -824,14 +824,15 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
+ return conn;
+
+ failed_2:
+- kiblnd_destroy_conn(conn, true);
++ kiblnd_destroy_conn(conn);
++ LIBCFS_FREE(conn, sizeof(*conn));
+ failed_1:
+ LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
+ failed_0:
+ return NULL;
+ }
+
+-void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
++void kiblnd_destroy_conn(struct kib_conn *conn)
+ {
+ struct rdma_cm_id *cmid = conn->ibc_cmid;
+ struct kib_peer *peer = conn->ibc_peer;
+@@ -894,8 +895,6 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
+ rdma_destroy_id(cmid);
+ atomic_dec(&net->ibn_nconns);
+ }
+-
+- LIBCFS_FREE(conn, sizeof(*conn));
+ }
+
+ int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why)
+diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+index 14576977200f..30cb2f5b3c15 100644
+--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+@@ -1018,7 +1018,7 @@ int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why);
+ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer,
+ struct rdma_cm_id *cmid,
+ int state, int version);
+-void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn);
++void kiblnd_destroy_conn(struct kib_conn *conn);
+ void kiblnd_close_conn(struct kib_conn *conn, int error);
+ void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
+
+diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+index 995f2dac7f26..ea9a0c21d29d 100644
+--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+@@ -3323,11 +3323,13 @@ kiblnd_connd(void *arg)
+ spin_unlock_irqrestore(lock, flags);
+ dropped_lock = 1;
+
+- kiblnd_destroy_conn(conn, !peer);
++ kiblnd_destroy_conn(conn);
+
+ spin_lock_irqsave(lock, flags);
+- if (!peer)
++ if (!peer) {
++ kfree(conn);
+ continue;
++ }
+
+ conn->ibc_peer = peer;
+ if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE)
+diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+index 4de9dbc93380..c7bf8ab26192 100644
+--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+@@ -1397,19 +1397,13 @@ static int rtw_wx_get_essid(struct net_device *dev,
+ if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
+ len = pcur_bss->Ssid.SsidLength;
+-
+- wrqu->essid.length = len;
+-
+ memcpy(extra, pcur_bss->Ssid.Ssid, len);
+-
+- wrqu->essid.flags = 1;
+ } else {
+- ret = -1;
+- goto exit;
++ len = 0;
++ *extra = 0;
+ }
+-
+-exit:
+-
++ wrqu->essid.length = len;
++ wrqu->essid.flags = 1;
+
+ return ret;
+ }
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index a70356dad1b7..521a6e450755 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -2239,12 +2239,14 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
+ val &= ~UCR3_AWAKEN;
+ writel(val, sport->port.membase + UCR3);
+
+- val = readl(sport->port.membase + UCR1);
+- if (on)
+- val |= UCR1_RTSDEN;
+- else
+- val &= ~UCR1_RTSDEN;
+- writel(val, sport->port.membase + UCR1);
++ if (sport->have_rtscts) {
++ val = readl(sport->port.membase + UCR1);
++ if (on)
++ val |= UCR1_RTSDEN;
++ else
++ val &= ~UCR1_RTSDEN;
++ writel(val, sport->port.membase + UCR1);
++ }
+ }
+
+ static int imx_serial_port_suspend_noirq(struct device *dev)
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 734a635e7363..8d9f9a803b42 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1543,6 +1543,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
+ "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n",
+ __func__, tty->driver->name);
+
++ retval = tty_ldisc_lock(tty, 5 * HZ);
++ if (retval)
++ goto err_release_lock;
+ tty->port->itty = tty;
+
+ /*
+@@ -1553,6 +1556,7 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
+ retval = tty_ldisc_setup(tty, tty->link);
+ if (retval)
+ goto err_release_tty;
++ tty_ldisc_unlock(tty);
+ /* Return the tty locked so that it cannot vanish under the caller */
+ return tty;
+
+@@ -1565,9 +1569,11 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
+
+ /* call the tty release_tty routine to clean out this slot */
+ err_release_tty:
+- tty_unlock(tty);
++ tty_ldisc_unlock(tty);
+ tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n",
+ retval, idx);
++err_release_lock:
++ tty_unlock(tty);
+ release_tty(tty, idx);
+ return ERR_PTR(retval);
+ }
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index b0500a0a87b8..3a9e2a2fd4c6 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -336,7 +336,7 @@ static inline void __tty_ldisc_unlock(struct tty_struct *tty)
+ ldsem_up_write(&tty->ldisc_sem);
+ }
+
+-static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
++int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
+ {
+ int ret;
+
+@@ -347,7 +347,7 @@ static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
+ return 0;
+ }
+
+-static void tty_ldisc_unlock(struct tty_struct *tty)
++void tty_ldisc_unlock(struct tty_struct *tty)
+ {
+ clear_bit(TTY_LDISC_HALTED, &tty->flags);
+ __tty_ldisc_unlock(tty);
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index ea20b2cc189f..34d23cc99fbd 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -375,7 +375,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
+
+ res = usb_submit_urb(acm->read_urbs[index], mem_flags);
+ if (res) {
+- if (res != -EPERM) {
++ if (res != -EPERM && res != -ENODEV) {
+ dev_err(&acm->data->dev,
+ "urb %d failed submission with %d\n",
+ index, res);
+@@ -1706,6 +1706,9 @@ static const struct usb_device_id acm_ids[] = {
+ { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */
+ .driver_info = SINGLE_RX_URB, /* firmware bug */
+ },
++ { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
++ .driver_info = SINGLE_RX_URB,
++ },
+ { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 325bf21ba13b..406758ed0b23 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -150,7 +150,6 @@ int config_ep_by_speed(struct usb_gadget *g,
+ struct usb_function *f,
+ struct usb_ep *_ep)
+ {
+- struct usb_composite_dev *cdev = get_gadget_data(g);
+ struct usb_endpoint_descriptor *chosen_desc = NULL;
+ struct usb_descriptor_header **speed_desc = NULL;
+
+@@ -229,8 +228,12 @@ int config_ep_by_speed(struct usb_gadget *g,
+ _ep->maxburst = comp_desc->bMaxBurst + 1;
+ break;
+ default:
+- if (comp_desc->bMaxBurst != 0)
++ if (comp_desc->bMaxBurst != 0) {
++ struct usb_composite_dev *cdev;
++
++ cdev = get_gadget_data(g);
+ ERROR(cdev, "ep0 bMaxBurst must be 0\n");
++ }
+ _ep->maxburst = 1;
+ break;
+ }
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 7b107e43b1c4..d90bf57ba30e 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -3725,7 +3725,8 @@ static void ffs_closed(struct ffs_data *ffs)
+ ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
+ ffs_dev_unlock();
+
+- unregister_gadget_item(ci);
++ if (test_bit(FFS_FL_BOUND, &ffs->flags))
++ unregister_gadget_item(ci);
+ return;
+ done:
+ ffs_dev_unlock();
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index d685d82dcf48..e97539fc127e 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -913,7 +913,7 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
+ return 0;
+
+ /* "high bandwidth" works only at high speed */
+- if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11))
++ if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1)
+ return 0;
+
+ switch (type) {
+diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
+index 56ecb8b5115d..584ae8cbaf1c 100644
+--- a/drivers/usb/serial/Kconfig
++++ b/drivers/usb/serial/Kconfig
+@@ -63,6 +63,7 @@ config USB_SERIAL_SIMPLE
+ - Google USB serial devices
+ - HP4x calculators
+ - a number of Motorola phones
++ - Motorola Tetra devices
+ - Novatel Wireless GPS receivers
+ - Siemens USB/MPI adapter.
+ - ViVOtech ViVOpay USB device.
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
+index 464db17b5328..de61271f2ba3 100644
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -2215,7 +2215,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port,
+ /* something went wrong */
+ dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n",
+ __func__, status);
+- usb_kill_urb(urb);
+ usb_free_urb(urb);
+ atomic_dec(&CmdUrbs);
+ return status;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index a818c43a02ec..1799aa058a5b 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -383,6 +383,9 @@ static void option_instat_callback(struct urb *urb);
+ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
+ #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
+
++/* Fujisoft products */
++#define FUJISOFT_PRODUCT_FS040U 0x9b02
++
+ /* iBall 3.5G connect wireless modem */
+ #define IBALL_3_5G_CONNECT 0x9605
+
+@@ -1897,6 +1900,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
+ .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
+ },
++ {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index a51b28379850..3da25ad267a2 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -39,6 +39,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
++ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index 3b5a15d1dc0d..123289085ee2 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -17,6 +17,7 @@
+ #define PL2303_PRODUCT_ID_DCU11 0x1234
+ #define PL2303_PRODUCT_ID_PHAROS 0xaaa0
+ #define PL2303_PRODUCT_ID_RSAQ3 0xaaa2
++#define PL2303_PRODUCT_ID_CHILITAG 0xaaa8
+ #define PL2303_PRODUCT_ID_ALDIGA 0x0611
+ #define PL2303_PRODUCT_ID_MMX 0x0612
+ #define PL2303_PRODUCT_ID_GPRS 0x0609
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index e98b6e57b703..6aa7ff2c1cf7 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -80,6 +80,11 @@ DEVICE(vivopay, VIVOPAY_IDS);
+ { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */
+ DEVICE(moto_modem, MOTO_IDS);
+
++/* Motorola Tetra driver */
++#define MOTOROLA_TETRA_IDS() \
++ { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
++DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
++
+ /* Novatel Wireless GPS driver */
+ #define NOVATEL_IDS() \
+ { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
+@@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
+ &google_device,
+ &vivopay_device,
+ &moto_modem_device,
++ &motorola_tetra_device,
+ &novatel_gps_device,
+ &hp4x_device,
+ &suunto_device,
+@@ -125,6 +131,7 @@ static const struct usb_device_id id_table[] = {
+ GOOGLE_IDS(),
+ VIVOPAY_IDS(),
+ MOTO_IDS(),
++ MOTOROLA_TETRA_IDS(),
+ NOVATEL_IDS(),
+ HP4X_IDS(),
+ SUUNTO_IDS(),
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 9876af4ab64e..6891e9092775 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -1076,20 +1076,19 @@ static int uas_post_reset(struct usb_interface *intf)
+ return 0;
+
+ err = uas_configure_endpoints(devinfo);
+- if (err) {
++ if (err && err != ENODEV)
+ shost_printk(KERN_ERR, shost,
+ "%s: alloc streams error %d after reset",
+ __func__, err);
+- return 1;
+- }
+
++ /* we must unblock the host in every case lest we deadlock */
+ spin_lock_irqsave(shost->host_lock, flags);
+ scsi_report_bus_reset(shost, 0);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ scsi_unblock_requests(shost);
+
+- return 0;
++ return err ? 1 : 0;
+ }
+
+ static int uas_suspend(struct usb_interface *intf, pm_message_t message)
+diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
+index 7f161b095176..dbe615ba07c9 100644
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -300,7 +300,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ case USB_PORT_FEAT_POWER:
+ usbip_dbg_vhci_rh(
+ " ClearPortFeature: USB_PORT_FEAT_POWER\n");
+- dum->port_status[rhport] = 0;
++ dum->port_status[rhport] &= ~USB_PORT_STAT_POWER;
+ dum->resuming = 0;
+ break;
+ case USB_PORT_FEAT_C_RESET:
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index e4b48f377d3a..c56253a1e5b4 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -1253,7 +1253,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+ /* Lock all pages first so we can lock the extent safely. */
+ ret = io_ctl_prepare_pages(io_ctl, inode, 0);
+ if (ret)
+- goto out;
++ goto out_unlock;
+
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+ &cached_state);
+@@ -1346,6 +1346,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+ out_nospc:
+ cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
+
++out_unlock:
+ if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+ up_write(&block_group->data_rwsem);
+
+diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
+index fd8c9a5bcac4..77d136ac8909 100644
+--- a/fs/nfs_common/grace.c
++++ b/fs/nfs_common/grace.c
+@@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm)
+ struct list_head *grace_list = net_generic(net, grace_net_id);
+
+ spin_lock(&grace_lock);
+- list_add(&lm->list, grace_list);
++ if (list_empty(&lm->list))
++ list_add(&lm->list, grace_list);
++ else
++ WARN(1, "double list_add attempt detected in net %x %s\n",
++ net->ns.inum, (net == &init_net) ? "(init_net)" : "");
+ spin_unlock(&grace_lock);
+ }
+ EXPORT_SYMBOL_GPL(locks_start_grace);
+@@ -104,7 +108,9 @@ grace_exit_net(struct net *net)
+ {
+ struct list_head *grace_list = net_generic(net, grace_net_id);
+
+- BUG_ON(!list_empty(grace_list));
++ WARN_ONCE(!list_empty(grace_list),
++ "net %x %s: grace_list is not empty\n",
++ net->ns.inum, __func__);
+ }
+
+ static struct pernet_operations grace_net_ops = {
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 9ebb2d7c8182..f463c4e0b2ea 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -63,12 +63,16 @@ static const stateid_t zero_stateid = {
+ static const stateid_t currentstateid = {
+ .si_generation = 1,
+ };
++static const stateid_t close_stateid = {
++ .si_generation = 0xffffffffU,
++};
+
+ static u64 current_sessionid = 1;
+
+ #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
+ #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
+ #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
++#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
+
+ /* forward declarations */
+ static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
+@@ -4866,7 +4870,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
+ struct nfs4_stid *s;
+ __be32 status = nfserr_bad_stateid;
+
+- if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
++ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
++ CLOSE_STATEID(stateid))
+ return status;
+ /* Client debugging aid. */
+ if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
+@@ -4924,7 +4929,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
+ else if (typemask & NFS4_DELEG_STID)
+ typemask |= NFS4_REVOKED_DELEG_STID;
+
+- if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
++ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
++ CLOSE_STATEID(stateid))
+ return nfserr_bad_stateid;
+ status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
+ if (status == nfserr_stale_clientid) {
+@@ -5175,15 +5181,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
+ status = nfsd4_check_seqid(cstate, sop, seqid);
+ if (status)
+ return status;
+- if (stp->st_stid.sc_type == NFS4_CLOSED_STID
+- || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
+- /*
+- * "Closed" stateid's exist *only* to return
+- * nfserr_replay_me from the previous step, and
+- * revoked delegations are kept only for free_stateid.
+- */
+- return nfserr_bad_stateid;
+- mutex_lock(&stp->st_mutex);
++ status = nfsd4_lock_ol_stateid(stp);
++ if (status != nfs_ok)
++ return status;
+ status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
+ if (status == nfs_ok)
+ status = nfs4_check_fh(current_fh, &stp->st_stid);
+@@ -5407,6 +5407,11 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ nfsd4_close_open_stateid(stp);
+ mutex_unlock(&stp->st_mutex);
+
++ /* See RFC5661 sectionm 18.2.4 */
++ if (stp->st_stid.sc_client->cl_minorversion)
++ memcpy(&close->cl_stateid, &close_stateid,
++ sizeof(close->cl_stateid));
++
+ /* put reference from nfs4_preprocess_seqid_op */
+ nfs4_put_stid(&stp->st_stid);
+ out:
+@@ -7007,6 +7012,10 @@ static int nfs4_state_create_net(struct net *net)
+ INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
+ nn->conf_name_tree = RB_ROOT;
+ nn->unconf_name_tree = RB_ROOT;
++ nn->boot_time = get_seconds();
++ nn->grace_ended = false;
++ nn->nfsd4_manager.block_opens = true;
++ INIT_LIST_HEAD(&nn->nfsd4_manager.list);
+ INIT_LIST_HEAD(&nn->client_lru);
+ INIT_LIST_HEAD(&nn->close_lru);
+ INIT_LIST_HEAD(&nn->del_recall_lru);
+@@ -7064,9 +7073,6 @@ nfs4_state_start_net(struct net *net)
+ ret = nfs4_state_create_net(net);
+ if (ret)
+ return ret;
+- nn->boot_time = get_seconds();
+- nn->grace_ended = false;
+- nn->nfsd4_manager.block_opens = true;
+ locks_start_grace(net, &nn->nfsd4_manager);
+ nfsd4_client_tracking_init(net);
+ printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 1bfac28b7e7d..f9246ac4eef8 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -2985,7 +2985,8 @@ static int __init dquot_init(void)
+ pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
+ " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
+
+- register_shrinker(&dqcache_shrinker);
++ if (register_shrinker(&dqcache_shrinker))
++ panic("Cannot register dquot shrinker");
+
+ return 0;
+ }
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 0a6ad4e71e88..e101d70d2327 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -2521,7 +2521,6 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
+ return err;
+ if (inode->i_size < off + len - towrite)
+ i_size_write(inode, off + len - towrite);
+- inode->i_version++;
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ mark_inode_dirty(inode);
+ return len - towrite;
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index d31cd1ebd8e9..f3acecf3869d 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -391,7 +391,7 @@ xfs_map_blocks(
+ (ip->i_df.if_flags & XFS_IFEXTENTS));
+ ASSERT(offset <= mp->m_super->s_maxbytes);
+
+- if (offset + count > mp->m_super->s_maxbytes)
++ if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
+ count = mp->m_super->s_maxbytes - offset;
+ end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+@@ -1295,7 +1295,7 @@ xfs_map_trim_size(
+ if (mapping_size > size)
+ mapping_size = size;
+ if (offset < i_size_read(inode) &&
+- offset + mapping_size >= i_size_read(inode)) {
++ (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
+ /* limit mapping to block that spans EOF */
+ mapping_size = roundup_64(i_size_read(inode) - offset,
+ i_blocksize(inode));
+@@ -1347,7 +1347,7 @@ __xfs_get_blocks(
+ lockmode = xfs_ilock_data_map_shared(ip);
+
+ ASSERT(offset <= mp->m_super->s_maxbytes);
+- if (offset + size > mp->m_super->s_maxbytes)
++ if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
+ size = mp->m_super->s_maxbytes - offset;
+ end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index eca7baecc9f0..3f45d9867e10 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -1785,22 +1785,27 @@ xfs_alloc_buftarg(
+ btp->bt_bdi = blk_get_backing_dev_info(bdev);
+
+ if (xfs_setsize_buftarg_early(btp, bdev))
+- goto error;
++ goto error_free;
+
+ if (list_lru_init(&btp->bt_lru))
+- goto error;
++ goto error_free;
+
+ if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
+- goto error;
++ goto error_lru;
+
+ btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
+ btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
+ btp->bt_shrinker.seeks = DEFAULT_SEEKS;
+ btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
+- register_shrinker(&btp->bt_shrinker);
++ if (register_shrinker(&btp->bt_shrinker))
++ goto error_pcpu;
+ return btp;
+
+-error:
++error_pcpu:
++ percpu_counter_destroy(&btp->bt_io_count);
++error_lru:
++ list_lru_destroy(&btp->bt_lru);
++error_free:
+ kmem_free(btp);
+ return NULL;
+ }
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
+index 9d06cc30e875..7a7b3ccf2273 100644
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -1004,14 +1004,22 @@ xfs_qm_dqflush_done(
+ * holding the lock before removing the dquot from the AIL.
+ */
+ if ((lip->li_flags & XFS_LI_IN_AIL) &&
+- lip->li_lsn == qip->qli_flush_lsn) {
++ ((lip->li_lsn == qip->qli_flush_lsn) ||
++ (lip->li_flags & XFS_LI_FAILED))) {
+
+ /* xfs_trans_ail_delete() drops the AIL lock. */
+ spin_lock(&ailp->xa_lock);
+- if (lip->li_lsn == qip->qli_flush_lsn)
++ if (lip->li_lsn == qip->qli_flush_lsn) {
+ xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
+- else
++ } else {
++ /*
++ * Clear the failed state since we are about to drop the
++ * flush lock
++ */
++ if (lip->li_flags & XFS_LI_FAILED)
++ xfs_clear_li_failed(lip);
+ spin_unlock(&ailp->xa_lock);
++ }
+ }
+
+ /*
+diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
+index 2c7a1629e064..664dea105e76 100644
+--- a/fs/xfs/xfs_dquot_item.c
++++ b/fs/xfs/xfs_dquot_item.c
+@@ -137,6 +137,26 @@ xfs_qm_dqunpin_wait(
+ wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
+ }
+
++/*
++ * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
++ * have been failed during writeback
++ *
++ * this informs the AIL that the dquot is already flush locked on the next push,
++ * and acquires a hold on the buffer to ensure that it isn't reclaimed before
++ * dirty data makes it to disk.
++ */
++STATIC void
++xfs_dquot_item_error(
++ struct xfs_log_item *lip,
++ struct xfs_buf *bp)
++{
++ struct xfs_dquot *dqp;
++
++ dqp = DQUOT_ITEM(lip)->qli_dquot;
++ ASSERT(!completion_done(&dqp->q_flush));
++ xfs_set_li_failed(lip, bp);
++}
++
+ STATIC uint
+ xfs_qm_dquot_logitem_push(
+ struct xfs_log_item *lip,
+@@ -144,13 +164,28 @@ xfs_qm_dquot_logitem_push(
+ __acquires(&lip->li_ailp->xa_lock)
+ {
+ struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
+- struct xfs_buf *bp = NULL;
++ struct xfs_buf *bp = lip->li_buf;
+ uint rval = XFS_ITEM_SUCCESS;
+ int error;
+
+ if (atomic_read(&dqp->q_pincount) > 0)
+ return XFS_ITEM_PINNED;
+
++ /*
++ * The buffer containing this item failed to be written back
++ * previously. Resubmit the buffer for IO
++ */
++ if (lip->li_flags & XFS_LI_FAILED) {
++ if (!xfs_buf_trylock(bp))
++ return XFS_ITEM_LOCKED;
++
++ if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
++ rval = XFS_ITEM_FLUSHING;
++
++ xfs_buf_unlock(bp);
++ return rval;
++ }
++
+ if (!xfs_dqlock_nowait(dqp))
+ return XFS_ITEM_LOCKED;
+
+@@ -242,7 +277,8 @@ static const struct xfs_item_ops xfs_dquot_item_ops = {
+ .iop_unlock = xfs_qm_dquot_logitem_unlock,
+ .iop_committed = xfs_qm_dquot_logitem_committed,
+ .iop_push = xfs_qm_dquot_logitem_push,
+- .iop_committing = xfs_qm_dquot_logitem_committing
++ .iop_committing = xfs_qm_dquot_logitem_committing,
++ .iop_error = xfs_dquot_item_error
+ };
+
+ /*
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index 98ca9f1b6a07..c5f2f1e3cc4b 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -2429,6 +2429,24 @@ xfs_ifree_cluster(
+ return 0;
+ }
+
++/*
++ * Free any local-format buffers sitting around before we reset to
++ * extents format.
++ */
++static inline void
++xfs_ifree_local_data(
++ struct xfs_inode *ip,
++ int whichfork)
++{
++ struct xfs_ifork *ifp;
++
++ if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
++ return;
++
++ ifp = XFS_IFORK_PTR(ip, whichfork);
++ xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
++}
++
+ /*
+ * This is called to return an inode to the inode free list.
+ * The inode should already be truncated to 0 length and have
+@@ -2466,6 +2484,9 @@ xfs_ifree(
+ if (error)
+ return error;
+
++ xfs_ifree_local_data(ip, XFS_DATA_FORK);
++ xfs_ifree_local_data(ip, XFS_ATTR_FORK);
++
+ VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
+ ip->i_d.di_flags = 0;
+ ip->i_d.di_dmevmask = 0;
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 25ed105bbcfb..20ee90c47cd5 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -737,6 +737,12 @@ enum {
+ MLX5_CAP_PORT_TYPE_ETH = 0x1,
+ };
+
++enum {
++ MLX5_CAP_UMR_FENCE_STRONG = 0x0,
++ MLX5_CAP_UMR_FENCE_SMALL = 0x1,
++ MLX5_CAP_UMR_FENCE_NONE = 0x2,
++};
++
+ struct mlx5_ifc_cmd_hca_cap_bits {
+ u8 reserved_at_0[0x80];
+
+@@ -838,7 +844,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
+ u8 striding_rq[0x1];
+ u8 reserved_at_201[0x2];
+ u8 ipoib_basic_offloads[0x1];
+- u8 reserved_at_205[0xa];
++ u8 reserved_at_205[0x5];
++ u8 umr_fence[0x2];
++ u8 reserved_at_20c[0x3];
+ u8 drain_sigerr[0x1];
+ u8 cmdif_checksum[0x2];
+ u8 sigerr_cqe[0x1];
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 40144f382516..a41244fe58d0 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -394,6 +394,8 @@ extern struct tty_struct *get_current_tty(void);
+ /* tty_io.c */
+ extern int __init tty_init(void);
+ extern const char *tty_name(const struct tty_struct *tty);
++extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
++extern void tty_ldisc_unlock(struct tty_struct *tty);
+ #else
+ static inline void console_init(void)
+ { }
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index d1380ed93fdf..20cf3be9a5e8 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -1442,6 +1442,8 @@ static void kmemleak_scan(void)
+ if (page_count(page) == 0)
+ continue;
+ scan_block(page, page + 1, NULL);
++ if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page))))
++ cond_resched();
+ }
+ }
+ put_online_mems();
+diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
+index b747c9645e43..fed598a202c8 100644
+--- a/net/mac80211/mesh_hwmp.c
++++ b/net/mac80211/mesh_hwmp.c
+@@ -788,7 +788,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ struct mesh_path *mpath;
+ u8 ttl, flags, hopcount;
+ const u8 *orig_addr;
+- u32 orig_sn, metric, metric_txsta, interval;
++ u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
+ bool root_is_gate;
+
+ ttl = rann->rann_ttl;
+@@ -799,7 +799,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ interval = le32_to_cpu(rann->rann_interval);
+ hopcount = rann->rann_hopcount;
+ hopcount++;
+- metric = le32_to_cpu(rann->rann_metric);
++ orig_metric = le32_to_cpu(rann->rann_metric);
+
+ /* Ignore our own RANNs */
+ if (ether_addr_equal(orig_addr, sdata->vif.addr))
+@@ -816,7 +816,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ return;
+ }
+
+- metric_txsta = airtime_link_metric_get(local, sta);
++ last_hop_metric = airtime_link_metric_get(local, sta);
++ new_metric = orig_metric + last_hop_metric;
++ if (new_metric < orig_metric)
++ new_metric = MAX_METRIC;
+
+ mpath = mesh_path_lookup(sdata, orig_addr);
+ if (!mpath) {
+@@ -829,7 +832,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ }
+
+ if (!(SN_LT(mpath->sn, orig_sn)) &&
+- !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
++ !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
+ rcu_read_unlock();
+ return;
+ }
+@@ -847,7 +850,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ }
+
+ mpath->sn = orig_sn;
+- mpath->rann_metric = metric + metric_txsta;
++ mpath->rann_metric = new_metric;
+ mpath->is_root = true;
+ /* Recording RANNs sender address to send individually
+ * addressed PREQs destined for root mesh STA */
+@@ -867,7 +870,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
+ orig_sn, 0, NULL, 0, broadcast_addr,
+ hopcount, ttl, interval,
+- metric + metric_txsta, 0, sdata);
++ new_metric, 0, sdata);
+ }
+
+ rcu_read_unlock();
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 07925418c2a5..1668916bdbde 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -1789,14 +1789,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
+
+ #define MAX_ACTIONS_BUFSIZE (32 * 1024)
+
+-static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
++static struct sw_flow_actions *nla_alloc_flow_actions(int size)
+ {
+ struct sw_flow_actions *sfa;
+
+- if (size > MAX_ACTIONS_BUFSIZE) {
+- OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
+- return ERR_PTR(-EINVAL);
+- }
++ WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
+
+ sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
+ if (!sfa)
+@@ -1869,12 +1866,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
+ new_acts_size = ksize(*sfa) * 2;
+
+ if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
+- if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
++ if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
++ OVS_NLERR(log, "Flow action size exceeds max %u",
++ MAX_ACTIONS_BUFSIZE);
+ return ERR_PTR(-EMSGSIZE);
++ }
+ new_acts_size = MAX_ACTIONS_BUFSIZE;
+ }
+
+- acts = nla_alloc_flow_actions(new_acts_size, log);
++ acts = nla_alloc_flow_actions(new_acts_size);
+ if (IS_ERR(acts))
+ return (void *)acts;
+
+@@ -2500,7 +2500,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ {
+ int err;
+
+- *sfa = nla_alloc_flow_actions(nla_len(attr), log);
++ *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
+ if (IS_ERR(*sfa))
+ return PTR_ERR(*sfa);
+
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index e01c825bc683..d24d14ea8ba4 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2381,6 +2381,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ case -ECONNREFUSED:
+ case -ECONNRESET:
+ case -ENETUNREACH:
++ case -EHOSTUNREACH:
+ case -EADDRINUSE:
+ case -ENOBUFS:
+ /* retry with existing socket, after a delay */
+diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
+index 1c14c2595158..4b36323ea64b 100644
+--- a/tools/gpio/gpio-event-mon.c
++++ b/tools/gpio/gpio-event-mon.c
+@@ -23,6 +23,7 @@
+ #include <getopt.h>
+ #include <inttypes.h>
+ #include <sys/ioctl.h>
++#include <sys/types.h>
+ #include <linux/gpio.h>
+
+ int monitor_device(const char *device_name,
+diff --git a/tools/power/cpupower/bench/system.c b/tools/power/cpupower/bench/system.c
+index c25a74ae51ba..2bb3eef7d5c1 100644
+--- a/tools/power/cpupower/bench/system.c
++++ b/tools/power/cpupower/bench/system.c
+@@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu)
+
+ dprintf("set %s as cpufreq governor\n", governor);
+
+- if (cpupower_is_cpu_online(cpu) != 0) {
++ if (cpupower_is_cpu_online(cpu) != 1) {
+ perror("cpufreq_cpu_exists");
+ fprintf(stderr, "error: cpu %u does not exist\n", cpu);
+ return -1;
+diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
+index 1b5da0066ebf..5b3205f16217 100644
+--- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
++++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
+@@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void)
+ {
+ int num;
+ char *tmp;
++ int this_cpu;
++
++ this_cpu = sched_getcpu();
+
+ /* Assume idle state count is the same for all CPUs */
+- cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0);
++ cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu);
+
+ if (cpuidle_sysfs_monitor.hw_states_num <= 0)
+ return NULL;
+
+ for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
+- tmp = cpuidle_state_name(0, num);
++ tmp = cpuidle_state_name(this_cpu, num);
+ if (tmp == NULL)
+ continue;
+
+@@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void)
+ strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
+ free(tmp);
+
+- tmp = cpuidle_state_desc(0, num);
++ tmp = cpuidle_state_desc(this_cpu, num);
+ if (tmp == NULL)
+ continue;
+ strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1);
+diff --git a/tools/usb/usbip/src/usbip_bind.c b/tools/usb/usbip/src/usbip_bind.c
+index fa46141ae68b..e121cfb1746a 100644
+--- a/tools/usb/usbip/src/usbip_bind.c
++++ b/tools/usb/usbip/src/usbip_bind.c
+@@ -144,6 +144,7 @@ static int bind_device(char *busid)
+ int rc;
+ struct udev *udev;
+ struct udev_device *dev;
++ const char *devpath;
+
+ /* Check whether the device with this bus ID exists. */
+ udev = udev_new();
+@@ -152,8 +153,16 @@ static int bind_device(char *busid)
+ err("device with the specified bus ID does not exist");
+ return -1;
+ }
++ devpath = udev_device_get_devpath(dev);
+ udev_unref(udev);
+
++ /* If the device is already attached to vhci_hcd - bail out */
++ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
++ err("bind loop detected: device: %s is attached to %s\n",
++ devpath, USBIP_VHCI_DRV_NAME);
++ return -1;
++ }
++
+ rc = unbind_other(busid);
+ if (rc == UNBIND_ST_FAILED) {
+ err("could not unbind driver from device on busid %s", busid);
+diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c
+index f1b38e866dd7..d65a9f444174 100644
+--- a/tools/usb/usbip/src/usbip_list.c
++++ b/tools/usb/usbip/src/usbip_list.c
+@@ -187,6 +187,7 @@ static int list_devices(bool parsable)
+ const char *busid;
+ char product_name[128];
+ int ret = -1;
++ const char *devpath;
+
+ /* Create libudev context. */
+ udev = udev_new();
+@@ -209,6 +210,14 @@ static int list_devices(bool parsable)
+ path = udev_list_entry_get_name(dev_list_entry);
+ dev = udev_device_new_from_syspath(udev, path);
+
++ /* Ignore devices attached to vhci_hcd */
++ devpath = udev_device_get_devpath(dev);
++ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
++ dbg("Skip the device %s already attached to %s\n",
++ devpath, USBIP_VHCI_DRV_NAME);
++ continue;
++ }
++
+ /* Get device information. */
+ idVendor = udev_device_get_sysattr_value(dev, "idVendor");
+ idProduct = udev_device_get_sysattr_value(dev, "idProduct");