summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2016-03-17 18:50:28 -0400
committerMike Pagano <mpagano@gentoo.org>2016-03-17 18:50:28 -0400
commit17dfbe391ee8af34cdd598d8871504327cf9c210 (patch)
tree6ec94f984605e87dcc394055c97b51b5781bd05a
parentLinux patch 3.18.28 (diff)
downloadlinux-patches-17dfbe39.tar.gz
linux-patches-17dfbe39.tar.bz2
linux-patches-17dfbe39.zip
Linux patch 3.18.293.18-30
-rw-r--r--0000_README4
-rw-r--r--1028_linux-3.18.29.patch3850
2 files changed, 3854 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 124258e8..51c140d2 100644
--- a/0000_README
+++ b/0000_README
@@ -155,6 +155,10 @@ Patch: 1027_linux-3.18.28.patch
From: http://www.kernel.org
Desc: Linux 3.18.28
+Patch: 1028_linux-3.18.29.patch
+From: http://www.kernel.org
+Desc: Linux 3.18.29
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1028_linux-3.18.29.patch b/1028_linux-3.18.29.patch
new file mode 100644
index 00000000..c935dc3f
--- /dev/null
+++ b/1028_linux-3.18.29.patch
@@ -0,0 +1,3850 @@
+diff --git a/Makefile b/Makefile
+index f849f29ce405..13063ba47412 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 18
+-SUBLEVEL = 28
++SUBLEVEL = 29
+ EXTRAVERSION =
+ NAME = Diseased Newt
+
+diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
+index 8c97208b9b97..be68848071fd 100644
+--- a/arch/arm/kvm/guest.c
++++ b/arch/arm/kvm/guest.c
+@@ -173,7 +173,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ u64 val;
+
+ val = kvm_arm_timer_get_reg(vcpu, reg->id);
+- return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
++ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
+ }
+
+ static unsigned long num_core_regs(void)
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
+index 2d6b6065fe7f..23e35de789f7 100644
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -1,3 +1,4 @@
++#include <linux/ftrace.h>
+ #include <linux/percpu.h>
+ #include <linux/slab.h>
+ #include <asm/cacheflush.h>
+@@ -92,6 +93,13 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+ local_dbg_save(flags);
+
+ /*
++ * Function graph tracer state gets incosistent when the kernel
++ * calls functions that never return (aka suspend finishers) hence
++ * disable graph tracing during their execution.
++ */
++ pause_graph_tracing();
++
++ /*
+ * mm context saved on the stack, it will be restored when
+ * the cpu comes out of reset through the identity mapped
+ * page tables, so that the thread address space is properly
+@@ -128,6 +136,8 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+ hw_breakpoint_restore(NULL);
+ }
+
++ unpause_graph_tracing();
++
+ /*
+ * Restore pstate flags. OS lock and mdscr have been already
+ * restored, so from this point onwards, debugging is fully
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index 84d5959ff874..61ec16bd528b 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -184,7 +184,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ u64 val;
+
+ val = kvm_arm_timer_get_reg(vcpu, reg->id);
+- return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
++ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
+ }
+
+ /**
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index f506c53cb4f5..2012a5a3055b 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -692,15 +692,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
+ asmlinkage void do_ov(struct pt_regs *regs)
+ {
+ enum ctx_state prev_state;
+- siginfo_t info;
++ siginfo_t info = {
++ .si_signo = SIGFPE,
++ .si_code = FPE_INTOVF,
++ .si_addr = (void __user *)regs->cp0_epc,
++ };
+
+ prev_state = exception_enter();
+ die_if_kernel("Integer overflow", regs);
+
+- info.si_code = FPE_INTOVF;
+- info.si_signo = SIGFPE;
+- info.si_errno = 0;
+- info.si_addr = (void __user *) regs->cp0_epc;
+ force_sig_info(SIGFPE, &info, current);
+ exception_exit(prev_state);
+ }
+@@ -803,7 +803,7 @@ out:
+ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+ const char *str)
+ {
+- siginfo_t info;
++ siginfo_t info = { 0 };
+ char b[40];
+
+ #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+@@ -831,7 +831,6 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+ else
+ info.si_code = FPE_INTOVF;
+ info.si_signo = SIGFPE;
+- info.si_errno = 0;
+ info.si_addr = (void __user *) regs->cp0_epc;
+ force_sig_info(SIGFPE, &info, current);
+ break;
+diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
+index 14c3e80e003a..eb95315109e0 100644
+--- a/arch/s390/kernel/compat_signal.c
++++ b/arch/s390/kernel/compat_signal.c
+@@ -293,7 +293,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
+
+ /* Restore high gprs from signal stack */
+ if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
+- sizeof(&sregs_ext->gprs_high)))
++ sizeof(sregs_ext->gprs_high)))
+ return -EFAULT;
+ for (i = 0; i < NUM_GPRS; i++)
+ *(__u32 *)&regs->gprs[i] = gprs_high[i];
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index ffe71228fc10..a39e89eaa763 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -422,6 +422,7 @@ ENTRY(ia32_syscall)
+ /*CFI_REL_OFFSET cs,CS-RIP*/
+ CFI_REL_OFFSET rip,RIP-RIP
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
++ ASM_CLAC /* Do this early to minimize exposure */
+ SWAPGS
+ /*
+ * No need to follow this irqs on/off section: the syscall
+diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
+index d1daead5fcdd..adb3eaf8fe2a 100644
+--- a/arch/x86/kernel/acpi/sleep.c
++++ b/arch/x86/kernel/acpi/sleep.c
+@@ -16,6 +16,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/realmode.h>
+
++#include <linux/ftrace.h>
+ #include "../../realmode/rm/wakeup.h"
+ #include "sleep.h"
+
+@@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void)
+ saved_magic = 0x123456789abcdef0L;
+ #endif /* CONFIG_64BIT */
+
++ /*
++ * Pause/unpause graph tracing around do_suspend_lowlevel as it has
++ * inconsistent call/return info after it jumps to the wakeup vector.
++ */
++ pause_graph_tracing();
+ do_suspend_lowlevel();
++ unpause_graph_tracing();
+ return 0;
+ }
+
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index 6e6d115fe9b5..d537c9badeb6 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -257,7 +257,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
+ return ret;
+
+ mark_page_dirty(vcpu->kvm, table_gfn);
+- walker->ptes[level] = pte;
++ walker->ptes[level - 1] = pte;
+ }
+ return 0;
+ }
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 0bb431c3f74e..9fbf7c7fcbd9 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2065,6 +2065,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
+
+ static void record_steal_time(struct kvm_vcpu *vcpu)
+ {
++ accumulate_steal_time(vcpu);
++
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+
+@@ -2198,12 +2200,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ if (!(data & KVM_MSR_ENABLED))
+ break;
+
+- vcpu->arch.st.last_steal = current->sched_info.run_delay;
+-
+- preempt_disable();
+- accumulate_steal_time(vcpu);
+- preempt_enable();
+-
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+
+ break;
+@@ -2903,7 +2899,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ vcpu->cpu = cpu;
+ }
+
+- accumulate_steal_time(vcpu);
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ }
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 62dcd80ec2c0..f96e4d4f5319 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -363,6 +363,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
++ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
++ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
++ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
++ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
++ { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
++ { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
++ { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
++ { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
++ { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
++ { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
++ { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
++ { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
++ { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
++ { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
++ { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
++ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
+
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 78d56c9390cc..f96be389cf98 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -674,19 +674,18 @@ static int ata_ioc32(struct ata_port *ap)
+ int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
+ int cmd, void __user *arg)
+ {
+- int val = -EINVAL, rc = -EINVAL;
++ unsigned long val;
++ int rc = -EINVAL;
+ unsigned long flags;
+
+ switch (cmd) {
+- case ATA_IOC_GET_IO32:
++ case HDIO_GET_32BIT:
+ spin_lock_irqsave(ap->lock, flags);
+ val = ata_ioc32(ap);
+ spin_unlock_irqrestore(ap->lock, flags);
+- if (copy_to_user(arg, &val, 1))
+- return -EFAULT;
+- return 0;
++ return put_user(val, (unsigned long __user *)arg);
+
+- case ATA_IOC_SET_IO32:
++ case HDIO_SET_32BIT:
+ val = (unsigned long) arg;
+ rc = 0;
+ spin_lock_irqsave(ap->lock, flags);
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index 035dacc93382..fd5c5f3370f6 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
+ } while (ast_read32(ast, 0x10000) != 0x01);
+ data = ast_read32(ast, 0x10004);
+
+- if (data & 0x400)
++ if (data & 0x40)
+ ast->dram_bus_width = 16;
+ else
+ ast->dram_bus_width = 32;
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index fa537c0602e8..d5ea5c106dd1 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -936,12 +936,6 @@ force:
+
+ /* update display watermarks based on new power state */
+ radeon_bandwidth_update(rdev);
+- /* update displays */
+- radeon_dpm_display_configuration_changed(rdev);
+-
+- rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+- rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+- rdev->pm.dpm.single_display = single_display;
+
+ /* wait for the rings to drain */
+ for (i = 0; i < RADEON_NUM_RINGS; i++) {
+@@ -958,6 +952,13 @@ force:
+
+ radeon_dpm_post_set_power_state(rdev);
+
++ /* update displays */
++ radeon_dpm_display_configuration_changed(rdev);
++
++ rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
++ rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
++ rdev->pm.dpm.single_display = single_display;
++
+ if (rdev->asic->dpm.force_performance_level) {
+ if (rdev->pm.dpm.thermal_active) {
+ enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index 97342ebc7de7..e318980f8d45 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -476,8 +476,6 @@ static void hid_ctrl(struct urb *urb)
+ struct usbhid_device *usbhid = hid->driver_data;
+ int unplug = 0, status = urb->status;
+
+- spin_lock(&usbhid->lock);
+-
+ switch (status) {
+ case 0: /* success */
+ if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN)
+@@ -497,6 +495,8 @@ static void hid_ctrl(struct urb *urb)
+ hid_warn(urb->dev, "ctrl urb status %d received\n", status);
+ }
+
++ spin_lock(&usbhid->lock);
++
+ if (unplug) {
+ usbhid->ctrltail = usbhid->ctrlhead;
+ } else {
+diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
+index f155b8380481..2b3105c8aed3 100644
+--- a/drivers/hwmon/ads1015.c
++++ b/drivers/hwmon/ads1015.c
+@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
+ struct ads1015_data *data = i2c_get_clientdata(client);
+ unsigned int pga = data->channel_data[channel].pga;
+ int fullscale = fullscale_table[pga];
+- const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
++ const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
+
+ return DIV_ROUND_CLOSEST(reg * fullscale, mask);
+ }
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index b0522f15730f..2f3475247f0f 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -227,6 +227,10 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
+ static int amd_iommu_enable_interrupts(void);
+ static int __init iommu_go_to_state(enum iommu_init_state state);
+
++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
++ u8 bank, u8 cntr, u8 fxn,
++ u64 *value, bool is_write);
++
+ static inline void update_last_devid(u16 devid)
+ {
+ if (devid > amd_iommu_last_bdf)
+@@ -1066,6 +1070,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
+ }
+
+ /*
++ * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
++ * Workaround:
++ * BIOS should enable ATS write permission check by setting
++ * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
++ */
++static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
++{
++ u32 value;
++
++ if ((boot_cpu_data.x86 != 0x15) ||
++ (boot_cpu_data.x86_model < 0x30) ||
++ (boot_cpu_data.x86_model > 0x3f))
++ return;
++
++ /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
++ value = iommu_read_l2(iommu, 0x47);
++
++ if (value & BIT(0))
++ return;
++
++ /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
++ iommu_write_l2(iommu, 0x47, value | BIT(0));
++
++ pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
++ dev_name(&iommu->dev->dev));
++}
++
++/*
+ * This function clues the initialization function for one IOMMU
+ * together and also allocates the command buffer and programs the
+ * hardware. It does NOT enable the IOMMU. This is done afterwards.
+@@ -1192,8 +1224,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
+ amd_iommu_pc_present = true;
+
+ /* Check if the performance counters can be written to */
+- if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
+- (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
++ if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
++ (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
+ (val != val2)) {
+ pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
+ amd_iommu_pc_present = false;
+@@ -1339,6 +1371,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
+ }
+
+ amd_iommu_erratum_746_workaround(iommu);
++ amd_iommu_ats_write_check_workaround(iommu);
+
+ iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
+ amd_iommu_groups, "ivhd%d",
+@@ -2363,22 +2396,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
+ }
+ EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
+
+-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
++ u8 bank, u8 cntr, u8 fxn,
+ u64 *value, bool is_write)
+ {
+- struct amd_iommu *iommu;
+ u32 offset;
+ u32 max_offset_lim;
+
+- /* Make sure the IOMMU PC resource is available */
+- if (!amd_iommu_pc_present)
+- return -ENODEV;
+-
+- /* Locate the iommu associated with the device ID */
+- iommu = amd_iommu_rlookup_table[devid];
+-
+ /* Check for valid iommu and pc register indexing */
+- if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
++ if (WARN_ON((fxn > 0x28) || (fxn & 7)))
+ return -ENODEV;
+
+ offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
+@@ -2402,3 +2428,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
+ return 0;
+ }
+ EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
++
++int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
++ u64 *value, bool is_write)
++{
++ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
++
++ /* Make sure the IOMMU PC resource is available */
++ if (!amd_iommu_pc_present || iommu == NULL)
++ return -ENODEV;
++
++ return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
++ value, is_write);
++}
+diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
+index 47795ff71688..12cb0dee1721 100644
+--- a/drivers/media/i2c/adv7604.c
++++ b/drivers/media/i2c/adv7604.c
+@@ -1981,10 +1981,9 @@ static int adv7604_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+ }
+
+ /* tx 5v detect */
+- tx_5v = io_read(sd, 0x70) & info->cable_det_mask;
++ tx_5v = irq_reg_0x70 & info->cable_det_mask;
+ if (tx_5v) {
+ v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
+- io_write(sd, 0x71, tx_5v);
+ adv7604_s_detect_tx_5v_ctrl(sd);
+ if (handled)
+ *handled = true;
+diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
+index 2a1b6e037e1a..0134ba32a057 100644
+--- a/drivers/mtd/ubi/upd.c
++++ b/drivers/mtd/ubi/upd.c
+@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ vol->changing_leb = 1;
+ vol->ch_lnum = req->lnum;
+
+- vol->upd_buf = vmalloc(req->bytes);
++ vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
+ if (!vol->upd_buf)
+ return -ENOMEM;
+
+diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
+index 29d3f0938eb8..5c9f06cc35cc 100644
+--- a/drivers/net/can/usb/ems_usb.c
++++ b/drivers/net/can/usb/ems_usb.c
+@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
+ */
+ #define EMS_USB_ARM7_CLOCK 8000000
+
++#define CPC_TX_QUEUE_TRIGGER_LOW 25
++#define CPC_TX_QUEUE_TRIGGER_HIGH 35
++
+ /*
+ * CAN-Message representation in a CPC_MSG. Message object type is
+ * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
+@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
+ switch (urb->status) {
+ case 0:
+ dev->free_slots = dev->intr_in_buffer[1];
++ if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
++ if (netif_queue_stopped(netdev)){
++ netif_wake_queue(netdev);
++ }
++ }
+ break;
+
+ case -ECONNRESET: /* unlink */
+@@ -528,8 +536,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
+ /* Release context */
+ context->echo_index = MAX_TX_URBS;
+
+- if (netif_queue_stopped(netdev))
+- netif_wake_queue(netdev);
+ }
+
+ /*
+@@ -589,7 +595,7 @@ static int ems_usb_start(struct ems_usb *dev)
+ int err, i;
+
+ dev->intr_in_buffer[0] = 0;
+- dev->free_slots = 15; /* initial size */
++ dev->free_slots = 50; /* initial size */
+
+ for (i = 0; i < MAX_RX_URBS; i++) {
+ struct urb *urb = NULL;
+@@ -840,7 +846,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
+
+ /* Slow down tx path */
+ if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
+- dev->free_slots < 5) {
++ dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
+ netif_stop_queue(netdev);
+ }
+ }
+diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
+index 1560f4576c7d..90011b13fc9c 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
++++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
+@@ -368,4 +368,10 @@ enum secure_load_status_reg {
+ #define MON_BUFF_WRPTR (0xa03c44)
+ #define MON_BUFF_CYCLE_CNT (0xa03c48)
+
++/* FW chicken bits */
++#define LMPM_CHICK 0xA01FF8
++enum {
++ LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
++};
++
+ #endif /* __iwl_prph_h__ */
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+index f71c22f8beea..1e2b5af502b8 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+@@ -2144,6 +2144,10 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
++ /* we don't support "match all" in the firmware */
++ if (!req->n_match_sets)
++ return -EOPNOTSUPP;
++
+ ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
+ if (ret)
+ return ret;
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index c943baa4e9f1..9dcb04f221fd 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -79,6 +79,10 @@
+ #include "iwl-fw-error-dump.h"
+ #include "internal.h"
+
++/* extended range in FW SRAM */
++#define IWL_FW_MEM_EXTENDED_START 0x40000
++#define IWL_FW_MEM_EXTENDED_END 0x57FFF
++
+ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
+ {
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+@@ -625,14 +629,28 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
+ }
+
+ for (offset = 0; offset < section->len; offset += chunk_sz) {
+- u32 copy_size;
++ u32 copy_size, dst_addr;
++ bool extended_addr = false;
+
+ copy_size = min_t(u32, chunk_sz, section->len - offset);
++ dst_addr = section->offset + offset;
++
++ if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
++ dst_addr <= IWL_FW_MEM_EXTENDED_END)
++ extended_addr = true;
++
++ if (extended_addr)
++ iwl_set_bits_prph(trans, LMPM_CHICK,
++ LMPM_CHICK_EXTENDED_ADDR_SPACE);
+
+ memcpy(v_addr, (u8 *)section->data + offset, copy_size);
+- ret = iwl_pcie_load_firmware_chunk(trans,
+- section->offset + offset,
+- p_addr, copy_size);
++ ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
++ copy_size);
++
++ if (extended_addr)
++ iwl_clear_bits_prph(trans, LMPM_CHICK,
++ LMPM_CHICK_EXTENDED_ADDR_SPACE);
++
+ if (ret) {
+ IWL_ERR(trans,
+ "Could not load the [%d] uCode section\n",
+diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
+index 116ca3746adb..bce77100eee9 100644
+--- a/drivers/pci/xen-pcifront.c
++++ b/drivers/pci/xen-pcifront.c
+@@ -52,7 +52,7 @@ struct pcifront_device {
+ };
+
+ struct pcifront_sd {
+- int domain;
++ struct pci_sysdata sd;
+ struct pcifront_device *pdev;
+ };
+
+@@ -66,7 +66,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
+ unsigned int domain, unsigned int bus,
+ struct pcifront_device *pdev)
+ {
+- sd->domain = domain;
++ /* Because we do not expose that information via XenBus. */
++ sd->sd.node = first_online_node;
++ sd->sd.domain = domain;
+ sd->pdev = pdev;
+ }
+
+@@ -464,8 +466,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
+ dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
+ domain, bus);
+
+- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
+- sd = kmalloc(sizeof(*sd), GFP_KERNEL);
++ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
++ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ if (!bus_entry || !sd) {
+ err = -ENOMEM;
+ goto err_out;
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 958c732c428e..0b1fdfe34c98 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4054,6 +4054,17 @@ reject:
+ return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ }
+
++static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
++{
++ bool ret;
++
++ spin_lock_bh(&conn->state_lock);
++ ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
++ spin_unlock_bh(&conn->state_lock);
++
++ return ret;
++}
++
+ int iscsi_target_rx_thread(void *arg)
+ {
+ int ret, rc;
+@@ -4071,7 +4082,7 @@ int iscsi_target_rx_thread(void *arg)
+ * incoming iscsi/tcp socket I/O, and/or failing the connection.
+ */
+ rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+- if (rc < 0)
++ if (rc < 0 || iscsi_target_check_conn_state(conn))
+ return 0;
+
+ if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index 092112e5e1a6..dcb63481325f 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -393,6 +393,7 @@ err:
+ if (login->login_complete) {
+ if (conn->rx_thread && conn->rx_thread_active) {
+ send_sig(SIGINT, conn->rx_thread, 1);
++ complete(&conn->rx_login_comp);
+ kthread_stop(conn->rx_thread);
+ }
+ if (conn->tx_thread && conn->tx_thread_active) {
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index fa89c2f105e6..496133a4be4f 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -299,7 +299,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ return 0;
+ }
+
+-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
++ int *post_ret)
+ {
+ unsigned char *buf, *addr;
+ struct scatterlist *sg;
+@@ -363,7 +364,8 @@ sbc_execute_rw(struct se_cmd *cmd)
+ cmd->data_direction);
+ }
+
+-static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
++ int *post_ret)
+ {
+ struct se_device *dev = cmd->se_dev;
+
+@@ -373,8 +375,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ * sent to the backend driver.
+ */
+ spin_lock_irq(&cmd->t_state_lock);
+- if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
++ if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
+ cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
++ *post_ret = 1;
++ }
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ /*
+@@ -386,7 +390,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ return TCM_NO_SENSE;
+ }
+
+-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
++ int *post_ret)
+ {
+ struct se_device *dev = cmd->se_dev;
+ struct scatterlist *write_sg = NULL, *sg;
+@@ -482,11 +487,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
+
+ if (block_size < PAGE_SIZE) {
+ sg_set_page(&write_sg[i], m.page, block_size,
+- block_size);
++ m.piter.sg->offset + block_size);
+ } else {
+ sg_miter_next(&m);
+ sg_set_page(&write_sg[i], m.page, block_size,
+- 0);
++ m.piter.sg->offset);
+ }
+ len -= block_size;
+ i++;
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index 383074723fde..a5c2b3cf7d1a 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -78,16 +78,18 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
+ kfree(tmr);
+ }
+
+-static void core_tmr_handle_tas_abort(
+- struct se_node_acl *tmr_nacl,
+- struct se_cmd *cmd,
+- int tas)
++static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+ {
+- bool remove = true;
++ unsigned long flags;
++ bool remove = true, send_tas;
+ /*
+ * TASK ABORTED status (TAS) bit support
+ */
+- if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ send_tas = (cmd->transport_state & CMD_T_TAS);
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
++ if (send_tas) {
+ remove = false;
+ transport_send_task_abort(cmd);
+ }
+@@ -110,6 +112,47 @@ static int target_check_cdb_and_preempt(struct list_head *list,
+ return 1;
+ }
+
++static bool __target_check_io_state(struct se_cmd *se_cmd,
++ struct se_session *tmr_sess, int tas)
++{
++ struct se_session *sess = se_cmd->se_sess;
++
++ assert_spin_locked(&sess->sess_cmd_lock);
++ WARN_ON_ONCE(!irqs_disabled());
++ /*
++ * If command already reached CMD_T_COMPLETE state within
++ * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
++ * this se_cmd has been passed to fabric driver and will
++ * not be aborted.
++ *
++ * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
++ * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
++ * long as se_cmd->cmd_kref is still active unless zero.
++ */
++ spin_lock(&se_cmd->t_state_lock);
++ if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
++ pr_debug("Attempted to abort io tag: %u already complete or"
++ " fabric stop, skipping\n",
++ se_cmd->se_tfo->get_task_tag(se_cmd));
++ spin_unlock(&se_cmd->t_state_lock);
++ return false;
++ }
++ if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
++ pr_debug("Attempted to abort io tag: %u already shutdown,"
++ " skipping\n", se_cmd->se_tfo->get_task_tag(se_cmd));
++ spin_unlock(&se_cmd->t_state_lock);
++ return false;
++ }
++ se_cmd->transport_state |= CMD_T_ABORTED;
++
++ if ((tmr_sess != se_cmd->se_sess) && tas)
++ se_cmd->transport_state |= CMD_T_TAS;
++
++ spin_unlock(&se_cmd->t_state_lock);
++
++ return kref_get_unless_zero(&se_cmd->cmd_kref);
++}
++
+ void core_tmr_abort_task(
+ struct se_device *dev,
+ struct se_tmr_req *tmr,
+@@ -136,25 +179,20 @@ void core_tmr_abort_task(
+ printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
+ se_cmd->se_tfo->get_fabric_name(), ref_tag);
+
+- spin_lock(&se_cmd->t_state_lock);
+- if (se_cmd->transport_state & CMD_T_COMPLETE) {
+- printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
+- spin_unlock(&se_cmd->t_state_lock);
++ if (!__target_check_io_state(se_cmd, se_sess, 0)) {
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
++ target_put_sess_cmd(se_cmd);
+ goto out;
+ }
+- se_cmd->transport_state |= CMD_T_ABORTED;
+- spin_unlock(&se_cmd->t_state_lock);
+
+ list_del_init(&se_cmd->se_cmd_list);
+- kref_get(&se_cmd->cmd_kref);
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+ cancel_work_sync(&se_cmd->work);
+ transport_wait_for_tasks(se_cmd);
+
+- target_put_sess_cmd(se_cmd);
+ transport_cmd_finish_abort(se_cmd, true);
++ target_put_sess_cmd(se_cmd);
+
+ printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
+ " ref_tag: %d\n", ref_tag);
+@@ -211,7 +249,8 @@ static void core_tmr_drain_tmr_list(
+
+ spin_lock(&sess->sess_cmd_lock);
+ spin_lock(&cmd->t_state_lock);
+- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
++ if (!(cmd->transport_state & CMD_T_ACTIVE) ||
++ (cmd->transport_state & CMD_T_FABRIC_STOP)) {
+ spin_unlock(&cmd->t_state_lock);
+ spin_unlock(&sess->sess_cmd_lock);
+ continue;
+@@ -221,15 +260,22 @@ static void core_tmr_drain_tmr_list(
+ spin_unlock(&sess->sess_cmd_lock);
+ continue;
+ }
++ if (sess->sess_tearing_down || cmd->cmd_wait_set) {
++ spin_unlock(&cmd->t_state_lock);
++ spin_unlock(&sess->sess_cmd_lock);
++ continue;
++ }
+ cmd->transport_state |= CMD_T_ABORTED;
+ spin_unlock(&cmd->t_state_lock);
+
+ rc = kref_get_unless_zero(&cmd->cmd_kref);
+- spin_unlock(&sess->sess_cmd_lock);
+ if (!rc) {
+ printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
++ spin_unlock(&sess->sess_cmd_lock);
+ continue;
+ }
++ spin_unlock(&sess->sess_cmd_lock);
++
+ list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
+ }
+ spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+@@ -254,13 +300,15 @@ static void core_tmr_drain_tmr_list(
+ static void core_tmr_drain_state_list(
+ struct se_device *dev,
+ struct se_cmd *prout_cmd,
+- struct se_node_acl *tmr_nacl,
++ struct se_session *tmr_sess,
+ int tas,
+ struct list_head *preempt_and_abort_list)
+ {
+ LIST_HEAD(drain_task_list);
++ struct se_session *sess;
+ struct se_cmd *cmd, *next;
+ unsigned long flags;
++ int rc;
+
+ /*
+ * Complete outstanding commands with TASK_ABORTED SAM status.
+@@ -299,6 +347,16 @@ static void core_tmr_drain_state_list(
+ if (prout_cmd == cmd)
+ continue;
+
++ sess = cmd->se_sess;
++ if (WARN_ON_ONCE(!sess))
++ continue;
++
++ spin_lock(&sess->sess_cmd_lock);
++ rc = __target_check_io_state(cmd, tmr_sess, tas);
++ spin_unlock(&sess->sess_cmd_lock);
++ if (!rc)
++ continue;
++
+ list_move_tail(&cmd->state_list, &drain_task_list);
+ cmd->state_active = false;
+ }
+@@ -306,7 +364,7 @@ static void core_tmr_drain_state_list(
+
+ while (!list_empty(&drain_task_list)) {
+ cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
+- list_del(&cmd->state_list);
++ list_del_init(&cmd->state_list);
+
+ pr_debug("LUN_RESET: %s cmd: %p"
+ " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
+@@ -330,16 +388,11 @@ static void core_tmr_drain_state_list(
+ * loop above, but we do it down here given that
+ * cancel_work_sync may block.
+ */
+- if (cmd->t_state == TRANSPORT_COMPLETE)
+- cancel_work_sync(&cmd->work);
+-
+- spin_lock_irqsave(&cmd->t_state_lock, flags);
+- target_stop_cmd(cmd, &flags);
+-
+- cmd->transport_state |= CMD_T_ABORTED;
+- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ cancel_work_sync(&cmd->work);
++ transport_wait_for_tasks(cmd);
+
+- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
++ core_tmr_handle_tas_abort(cmd, tas);
++ target_put_sess_cmd(cmd);
+ }
+ }
+
+@@ -351,6 +404,7 @@ int core_tmr_lun_reset(
+ {
+ struct se_node_acl *tmr_nacl = NULL;
+ struct se_portal_group *tmr_tpg = NULL;
++ struct se_session *tmr_sess = NULL;
+ int tas;
+ /*
+ * TASK_ABORTED status bit, this is configurable via ConfigFS
+@@ -369,8 +423,9 @@ int core_tmr_lun_reset(
+ * or struct se_device passthrough..
+ */
+ if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+- tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
++ tmr_sess = tmr->task_cmd->se_sess;
++ tmr_nacl = tmr_sess->se_node_acl;
++ tmr_tpg = tmr_sess->se_tpg;
+ if (tmr_nacl && tmr_tpg) {
+ pr_debug("LUN_RESET: TMR caller fabric: %s"
+ " initiator port %s\n",
+@@ -383,7 +438,7 @@ int core_tmr_lun_reset(
+ dev->transport->name, tas);
+
+ core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
+- core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
++ core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
+ preempt_and_abort_list);
+
+ /*
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index adf96b78e9f0..b6051c82f386 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -515,9 +515,6 @@ void transport_deregister_session(struct se_session *se_sess)
+ }
+ EXPORT_SYMBOL(transport_deregister_session);
+
+-/*
+- * Called with cmd->t_state_lock held.
+- */
+ static void target_remove_from_state_list(struct se_cmd *cmd)
+ {
+ struct se_device *dev = cmd->se_dev;
+@@ -542,10 +539,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&cmd->t_state_lock, flags);
+- if (write_pending)
+- cmd->t_state = TRANSPORT_WRITE_PENDING;
+-
+ if (remove_from_lists) {
+ target_remove_from_state_list(cmd);
+
+@@ -555,6 +548,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
+ cmd->se_lun = NULL;
+ }
+
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ if (write_pending)
++ cmd->t_state = TRANSPORT_WRITE_PENDING;
++
+ /*
+ * Determine if frontend context caller is requesting the stopping of
+ * this command for frontend exceptions.
+@@ -609,6 +606,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
+
+ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+ {
++ bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
++
+ if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+ transport_lun_remove_cmd(cmd);
+ /*
+@@ -620,7 +619,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+
+ if (transport_cmd_check_stop_to_fabric(cmd))
+ return;
+- if (remove)
++ if (remove && ack_kref)
+ transport_put_cmd(cmd);
+ }
+
+@@ -688,7 +687,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+ * Check for case where an explicit ABORT_TASK has been received
+ * and transport_wait_for_tasks() will be waiting for completion..
+ */
+- if (cmd->transport_state & CMD_T_ABORTED &&
++ if (cmd->transport_state & CMD_T_ABORTED ||
+ cmd->transport_state & CMD_T_STOP) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ complete_all(&cmd->t_transport_stop_comp);
+@@ -1596,7 +1595,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
+ void transport_generic_request_failure(struct se_cmd *cmd,
+ sense_reason_t sense_reason)
+ {
+- int ret = 0;
++ int ret = 0, post_ret = 0;
+
+ pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+ " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
+@@ -1619,7 +1618,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
+ */
+ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ cmd->transport_complete_callback)
+- cmd->transport_complete_callback(cmd, false);
++ cmd->transport_complete_callback(cmd, false, &post_ret);
+
+ switch (sense_reason) {
+ case TCM_NON_EXISTENT_LUN:
+@@ -1759,19 +1758,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
+ return true;
+ }
+
++static int __transport_check_aborted_status(struct se_cmd *, int);
++
+ void target_execute_cmd(struct se_cmd *cmd)
+ {
+ /*
+- * If the received CDB has aleady been aborted stop processing it here.
+- */
+- if (transport_check_aborted_status(cmd, 1))
+- return;
+-
+- /*
+ * Determine if frontend context caller is requesting the stopping of
+ * this command for frontend exceptions.
++ *
++ * If the received CDB has aleady been aborted stop processing it here.
+ */
+ spin_lock_irq(&cmd->t_state_lock);
++ if (__transport_check_aborted_status(cmd, 1)) {
++ spin_unlock_irq(&cmd->t_state_lock);
++ return;
++ }
+ if (cmd->transport_state & CMD_T_STOP) {
+ pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+ __func__, __LINE__,
+@@ -1974,11 +1975,13 @@ static void target_complete_ok_work(struct work_struct *work)
+ */
+ if (cmd->transport_complete_callback) {
+ sense_reason_t rc;
++ bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
++ bool zero_dl = !(cmd->data_length);
++ int post_ret = 0;
+
+- rc = cmd->transport_complete_callback(cmd, true);
+- if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
+- if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+- !cmd->data_length)
++ rc = cmd->transport_complete_callback(cmd, true, &post_ret);
++ if (!rc && !post_ret) {
++ if (caw && zero_dl)
+ goto queue_rsp;
+
+ return;
+@@ -2128,20 +2131,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
+ }
+
+ /**
+- * transport_release_cmd - free a command
+- * @cmd: command to free
++ * transport_put_cmd - release a reference to a command
++ * @cmd: command to release
+ *
+- * This routine unconditionally frees a command, and reference counting
+- * or list removal must be done in the caller.
++ * This routine releases our reference to the command and frees it if possible.
+ */
+-static int transport_release_cmd(struct se_cmd *cmd)
++static int transport_put_cmd(struct se_cmd *cmd)
+ {
+ BUG_ON(!cmd->se_tfo);
+-
+- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+- core_tmr_release_req(cmd->se_tmr_req);
+- if (cmd->t_task_cdb != cmd->__t_task_cdb)
+- kfree(cmd->t_task_cdb);
+ /*
+ * If this cmd has been setup with target_get_sess_cmd(), drop
+ * the kref and call ->release_cmd() in kref callback.
+@@ -2149,18 +2146,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
+ return target_put_sess_cmd(cmd);
+ }
+
+-/**
+- * transport_put_cmd - release a reference to a command
+- * @cmd: command to release
+- *
+- * This routine releases our reference to the command and frees it if possible.
+- */
+-static int transport_put_cmd(struct se_cmd *cmd)
+-{
+- transport_free_pages(cmd);
+- return transport_release_cmd(cmd);
+-}
+-
+ void *transport_kmap_data_sg(struct se_cmd *cmd)
+ {
+ struct scatterlist *sg = cmd->t_data_sg;
+@@ -2356,34 +2341,59 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
+ }
+ }
+
+-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
++static bool
++__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
++ unsigned long *flags);
++
++static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
+ {
+ unsigned long flags;
++
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++}
++
++int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
++{
+ int ret = 0;
++ bool aborted = false, tas = false;
+
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
+ if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+- transport_wait_for_tasks(cmd);
++ target_wait_free_cmd(cmd, &aborted, &tas);
+
+- ret = transport_release_cmd(cmd);
++ if (!aborted || tas)
++ ret = transport_put_cmd(cmd);
+ } else {
+ if (wait_for_tasks)
+- transport_wait_for_tasks(cmd);
++ target_wait_free_cmd(cmd, &aborted, &tas);
+ /*
+ * Handle WRITE failure case where transport_generic_new_cmd()
+ * has already added se_cmd to state_list, but fabric has
+ * failed command before I/O submission.
+ */
+- if (cmd->state_active) {
+- spin_lock_irqsave(&cmd->t_state_lock, flags);
++ if (cmd->state_active)
+ target_remove_from_state_list(cmd);
+- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+- }
+
+ if (cmd->se_lun)
+ transport_lun_remove_cmd(cmd);
+
+- ret = transport_put_cmd(cmd);
++ if (!aborted || tas)
++ ret = transport_put_cmd(cmd);
++ }
++ /*
++ * If the task has been internally aborted due to TMR ABORT_TASK
++ * or LUN_RESET, target_core_tmr.c is responsible for performing
++ * the remaining calls to target_put_sess_cmd(), and not the
++ * callers of this function.
++ */
++ if (aborted) {
++ pr_debug("Detected CMD_T_ABORTED for ITT: %u\n",
++ cmd->se_tfo->get_task_tag(cmd));
++ wait_for_completion(&cmd->cmd_wait_comp);
++ cmd->se_tfo->release_cmd(cmd);
++ ret = 1;
+ }
+ return ret;
+ }
+@@ -2425,24 +2435,44 @@ out:
+ }
+ EXPORT_SYMBOL(target_get_sess_cmd);
+
++static void target_free_cmd_mem(struct se_cmd *cmd)
++{
++ transport_free_pages(cmd);
++
++ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
++ core_tmr_release_req(cmd->se_tmr_req);
++ if (cmd->t_task_cdb != cmd->__t_task_cdb)
++ kfree(cmd->t_task_cdb);
++}
++
+ static void target_release_cmd_kref(struct kref *kref)
+ {
+ struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+ struct se_session *se_sess = se_cmd->se_sess;
++ bool fabric_stop;
+
+ if (list_empty(&se_cmd->se_cmd_list)) {
+ spin_unlock(&se_sess->sess_cmd_lock);
++ target_free_cmd_mem(se_cmd);
+ se_cmd->se_tfo->release_cmd(se_cmd);
+ return;
+ }
+- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
++
++ spin_lock(&se_cmd->t_state_lock);
++ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
++ spin_unlock(&se_cmd->t_state_lock);
++
++ if (se_cmd->cmd_wait_set || fabric_stop) {
++ list_del_init(&se_cmd->se_cmd_list);
+ spin_unlock(&se_sess->sess_cmd_lock);
++ target_free_cmd_mem(se_cmd);
+ complete(&se_cmd->cmd_wait_comp);
+ return;
+ }
+- list_del(&se_cmd->se_cmd_list);
++ list_del_init(&se_cmd->se_cmd_list);
+ spin_unlock(&se_sess->sess_cmd_lock);
+
++ target_free_cmd_mem(se_cmd);
+ se_cmd->se_tfo->release_cmd(se_cmd);
+ }
+
+@@ -2454,6 +2484,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
+ struct se_session *se_sess = se_cmd->se_sess;
+
+ if (!se_sess) {
++ target_free_cmd_mem(se_cmd);
+ se_cmd->se_tfo->release_cmd(se_cmd);
+ return 1;
+ }
+@@ -2471,6 +2502,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
+ {
+ struct se_cmd *se_cmd;
+ unsigned long flags;
++ int rc;
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ if (se_sess->sess_tearing_down) {
+@@ -2480,8 +2512,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
+ se_sess->sess_tearing_down = 1;
+ list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
+
+- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
+- se_cmd->cmd_wait_set = 1;
++ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
++ rc = kref_get_unless_zero(&se_cmd->cmd_kref);
++ if (rc) {
++ se_cmd->cmd_wait_set = 1;
++ spin_lock(&se_cmd->t_state_lock);
++ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
++ spin_unlock(&se_cmd->t_state_lock);
++ }
++ }
+
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ }
+@@ -2494,15 +2533,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
+ {
+ struct se_cmd *se_cmd, *tmp_cmd;
+ unsigned long flags;
++ bool tas;
+
+ list_for_each_entry_safe(se_cmd, tmp_cmd,
+ &se_sess->sess_wait_list, se_cmd_list) {
+- list_del(&se_cmd->se_cmd_list);
++ list_del_init(&se_cmd->se_cmd_list);
+
+ pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+ " %d\n", se_cmd, se_cmd->t_state,
+ se_cmd->se_tfo->get_cmd_state(se_cmd));
+
++ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
++ tas = (se_cmd->transport_state & CMD_T_TAS);
++ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
++
++ if (!target_put_sess_cmd(se_cmd)) {
++ if (tas)
++ target_put_sess_cmd(se_cmd);
++ }
++
+ wait_for_completion(&se_cmd->cmd_wait_comp);
+ pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
+ " fabric state: %d\n", se_cmd, se_cmd->t_state,
+@@ -2545,34 +2594,38 @@ int transport_clear_lun_ref(struct se_lun *lun)
+ return 0;
+ }
+
+-/**
+- * transport_wait_for_tasks - wait for completion to occur
+- * @cmd: command to wait
+- *
+- * Called from frontend fabric context to wait for storage engine
+- * to pause and/or release frontend generated struct se_cmd.
+- */
+-bool transport_wait_for_tasks(struct se_cmd *cmd)
++static bool
++__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
++ bool *aborted, bool *tas, unsigned long *flags)
++ __releases(&cmd->t_state_lock)
++ __acquires(&cmd->t_state_lock)
+ {
+- unsigned long flags;
+
+- spin_lock_irqsave(&cmd->t_state_lock, flags);
++ assert_spin_locked(&cmd->t_state_lock);
++ WARN_ON_ONCE(!irqs_disabled());
++
++ if (fabric_stop)
++ cmd->transport_state |= CMD_T_FABRIC_STOP;
++
++ if (cmd->transport_state & CMD_T_ABORTED)
++ *aborted = true;
++
++ if (cmd->transport_state & CMD_T_TAS)
++ *tas = true;
++
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
+- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+ return false;
+- }
+
+ if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
+- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+ return false;
+- }
+
+- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ if (!(cmd->transport_state & CMD_T_ACTIVE))
++ return false;
++
++ if (fabric_stop && *aborted)
+ return false;
+- }
+
+ cmd->transport_state |= CMD_T_STOP;
+
+@@ -2581,20 +2634,37 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
+ cmd, cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+
+- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+
+ wait_for_completion(&cmd->t_transport_stop_comp);
+
+- spin_lock_irqsave(&cmd->t_state_lock, flags);
++ spin_lock_irqsave(&cmd->t_state_lock, *flags);
+ cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
+
+ pr_debug("wait_for_tasks: Stopped wait_for_completion("
+ "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
+ cmd->se_tfo->get_task_tag(cmd));
+
++ return true;
++}
++
++/**
++ * transport_wait_for_tasks - wait for completion to occur
++ * @cmd: command to wait
++ *
++ * Called from frontend fabric context to wait for storage engine
++ * to pause and/or release frontend generated struct se_cmd.
++ */
++bool transport_wait_for_tasks(struct se_cmd *cmd)
++{
++ unsigned long flags;
++ bool ret, aborted = false, tas = false;
++
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+- return true;
++ return ret;
+ }
+ EXPORT_SYMBOL(transport_wait_for_tasks);
+
+@@ -2880,8 +2950,13 @@ after_reason:
+ }
+ EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+
+-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++ __releases(&cmd->t_state_lock)
++ __acquires(&cmd->t_state_lock)
+ {
++ assert_spin_locked(&cmd->t_state_lock);
++ WARN_ON_ONCE(!irqs_disabled());
++
+ if (!(cmd->transport_state & CMD_T_ABORTED))
+ return 0;
+
+@@ -2889,19 +2964,37 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+ * If cmd has been aborted but either no status is to be sent or it has
+ * already been sent, just return
+ */
+- if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
++ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
++ if (send_status)
++ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+ return 1;
++ }
+
+- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
+- cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
++ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
++ " 0x%02x ITT: 0x%08x\n", cmd->t_task_cdb[0],
++ cmd->se_tfo->get_task_tag(cmd));
+
+ cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ trace_target_cmd_complete(cmd);
++
++ spin_unlock_irq(&cmd->t_state_lock);
+ cmd->se_tfo->queue_status(cmd);
++ spin_lock_irq(&cmd->t_state_lock);
+
+ return 1;
+ }
++
++int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++{
++ int ret;
++
++ spin_lock_irq(&cmd->t_state_lock);
++ ret = __transport_check_aborted_status(cmd, send_status);
++ spin_unlock_irq(&cmd->t_state_lock);
++
++ return ret;
++}
+ EXPORT_SYMBOL(transport_check_aborted_status);
+
+ void transport_send_task_abort(struct se_cmd *cmd)
+@@ -2923,11 +3016,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
+ */
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+- cmd->transport_state |= CMD_T_ABORTED;
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ goto send_abort;
++ }
+ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
+ }
++send_abort:
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+
+ transport_lun_remove_cmd(cmd);
+diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
+index a048b08b9d4d..92f169ca520e 100644
+--- a/drivers/usb/chipidea/otg.c
++++ b/drivers/usb/chipidea/otg.c
+@@ -116,7 +116,7 @@ static void ci_otg_work(struct work_struct *work)
+ int ci_hdrc_otg_init(struct ci_hdrc *ci)
+ {
+ INIT_WORK(&ci->work, ci_otg_work);
+- ci->wq = create_singlethread_workqueue("ci_otg");
++ ci->wq = create_freezable_workqueue("ci_otg");
+ if (!ci->wq) {
+ dev_err(ci->dev, "can't create workqueue\n");
+ return -ENODEV;
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 66f62563bcf9..be6bda9019e7 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -769,7 +769,6 @@ struct dwc3 {
+ unsigned pullups_connected:1;
+ unsigned resize_fifos:1;
+ unsigned setup_packet_pending:1;
+- unsigned start_config_issued:1;
+ unsigned three_stage_setup:1;
+ };
+
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 1c1525b0a1fb..b75d7c8ad2f2 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -544,7 +544,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+ int ret;
+ u32 reg;
+
+- dwc->start_config_issued = false;
+ cfg = le16_to_cpu(ctrl->wValue);
+
+ switch (state) {
+@@ -727,10 +726,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+ dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY\n");
+ ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
+ break;
+- case USB_REQ_SET_INTERFACE:
+- dev_vdbg(dwc->dev, "USB_REQ_SET_INTERFACE\n");
+- dwc->start_config_issued = false;
+- /* Fall through */
+ default:
+ dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver\n");
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index d98faf73d6b2..d4f7b607fc61 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -382,24 +382,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
+ dep->trb_pool_dma = 0;
+ }
+
++static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
++
++/**
++ * dwc3_gadget_start_config - Configure EP resources
++ * @dwc: pointer to our controller context structure
++ * @dep: endpoint that is being enabled
++ *
++ * The assignment of transfer resources cannot perfectly follow the
++ * data book due to the fact that the controller driver does not have
++ * all knowledge of the configuration in advance. It is given this
++ * information piecemeal by the composite gadget framework after every
++ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
++ * programming model in this scenario can cause errors. For two
++ * reasons:
++ *
++ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
++ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
++ * multiple interfaces.
++ *
++ * 2) The databook does not mention doing more DEPXFERCFG for new
++ * endpoint on alt setting (8.1.6).
++ *
++ * The following simplified method is used instead:
++ *
++ * All hardware endpoints can be assigned a transfer resource and this
++ * setting will stay persistent until either a core reset or
++ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
++ * do DEPXFERCFG for every hardware endpoint as well. We are
++ * guaranteed that there are as many transfer resources as endpoints.
++ *
++ * This function is called for each endpoint when it is being enabled
++ * but is triggered only when called for EP0-out, which always happens
++ * first, and which should only happen in one of the above conditions.
++ */
+ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
+ {
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd;
++ int i;
++ int ret;
++
++ if (dep->number)
++ return 0;
+
+ memset(&params, 0x00, sizeof(params));
++ cmd = DWC3_DEPCMD_DEPSTARTCFG;
+
+- if (dep->number != 1) {
+- cmd = DWC3_DEPCMD_DEPSTARTCFG;
+- /* XferRscIdx == 0 for ep0 and 2 for the remaining */
+- if (dep->number > 1) {
+- if (dwc->start_config_issued)
+- return 0;
+- dwc->start_config_issued = true;
+- cmd |= DWC3_DEPCMD_PARAM(2);
+- }
++ ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
++ if (ret)
++ return ret;
+
+- return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
++ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
++ struct dwc3_ep *dep = dwc->eps[i];
++
++ if (!dep)
++ continue;
++
++ ret = dwc3_gadget_set_xfer_resource(dwc, dep);
++ if (ret)
++ return ret;
+ }
+
+ return 0;
+@@ -513,10 +555,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
+ struct dwc3_trb *trb_st_hw;
+ struct dwc3_trb *trb_link;
+
+- ret = dwc3_gadget_set_xfer_resource(dwc, dep);
+- if (ret)
+- return ret;
+-
+ dep->endpoint.desc = desc;
+ dep->comp_desc = comp_desc;
+ dep->type = usb_endpoint_type(desc);
+@@ -1582,8 +1620,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
+ }
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+
+- dwc->start_config_issued = false;
+-
+ /* Start with SuperSpeed Default */
+ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+
+@@ -2146,7 +2182,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ dwc3_disconnect_gadget(dwc);
+- dwc->start_config_issued = false;
+
+ dwc->gadget.speed = USB_SPEED_UNKNOWN;
+ dwc->setup_packet_pending = false;
+@@ -2200,7 +2235,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+
+ dwc3_stop_active_transfers(dwc);
+ dwc3_clear_stall_all_ep(dwc);
+- dwc->start_config_issued = false;
+
+ /* Reset device address to zero */
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 9364bed2a8b3..50426cf095a5 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -161,9 +161,14 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
++ { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
++ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
++ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
++ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
+ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
++ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
+ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 95d3ab73da0e..5e194f187802 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -272,6 +272,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_UE910_V2 0x1012
+ #define TELIT_PRODUCT_LE922_USBCFG0 0x1042
+ #define TELIT_PRODUCT_LE922_USBCFG3 0x1043
++#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
+ #define TELIT_PRODUCT_LE920 0x1200
+ #define TELIT_PRODUCT_LE910 0x1201
+
+@@ -317,6 +318,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TOSHIBA_PRODUCT_G450 0x0d45
+
+ #define ALINK_VENDOR_ID 0x1e0e
++#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
+ #define ALINK_PRODUCT_PH300 0x9100
+ #define ALINK_PRODUCT_3GU 0x9200
+
+@@ -611,6 +613,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ .reserved = BIT(3) | BIT(4),
+ };
+
++static const struct option_blacklist_info simcom_sim7100e_blacklist = {
++ .reserved = BIT(5) | BIT(6),
++};
++
+ static const struct option_blacklist_info telit_le910_blacklist = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1) | BIT(2),
+@@ -1130,6 +1136,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
+ { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
++ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+@@ -1137,6 +1145,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
+ { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
+ .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
+@@ -1188,6 +1198,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+ .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+@@ -1657,6 +1669,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
+ { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
++ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
++ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ },
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index d156545728c2..9dfbea6c1d2c 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -152,12 +152,17 @@ static const struct usb_device_id id_table[] = {
+ {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
+ {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
+ {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
++ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
++ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
++ {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
++ {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
+ {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
+ {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
+ {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
+ {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
+ {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
++ {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+
+ /* Huawei devices */
+ {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 2f8a0552d020..bc7bc4e26c89 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -375,7 +375,8 @@ static long vfio_pci_ioctl(void *device_data,
+ info.num_regions = VFIO_PCI_NUM_REGIONS;
+ info.num_irqs = VFIO_PCI_NUM_IRQS;
+
+- return copy_to_user((void __user *)arg, &info, minsz);
++ return copy_to_user((void __user *)arg, &info, minsz) ?
++ -EFAULT : 0;
+
+ } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
+ struct pci_dev *pdev = vdev->pdev;
+@@ -448,7 +449,8 @@ static long vfio_pci_ioctl(void *device_data,
+ return -EINVAL;
+ }
+
+- return copy_to_user((void __user *)arg, &info, minsz);
++ return copy_to_user((void __user *)arg, &info, minsz) ?
++ -EFAULT : 0;
+
+ } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
+ struct vfio_irq_info info;
+@@ -482,7 +484,8 @@ static long vfio_pci_ioctl(void *device_data,
+ else
+ info.flags |= VFIO_IRQ_INFO_NORESIZE;
+
+- return copy_to_user((void __user *)arg, &info, minsz);
++ return copy_to_user((void __user *)arg, &info, minsz) ?
++ -EFAULT : 0;
+
+ } else if (cmd == VFIO_DEVICE_SET_IRQS) {
+ struct vfio_irq_set hdr;
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 4a9d666f1e91..3d1cbbd730bc 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -928,7 +928,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
+
+ info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
+
+- return copy_to_user((void __user *)arg, &info, minsz);
++ return copy_to_user((void __user *)arg, &info, minsz) ?
++ -EFAULT : 0;
+
+ } else if (cmd == VFIO_IOMMU_MAP_DMA) {
+ struct vfio_iommu_type1_dma_map map;
+@@ -961,7 +962,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
+ if (ret)
+ return ret;
+
+- return copy_to_user((void __user *)arg, &unmap, minsz);
++ return copy_to_user((void __user *)arg, &unmap, minsz) ?
++ -EFAULT : 0;
+ }
+
+ return -ENOTTY;
+diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
+index f72af87640e0..560b3ecbcba8 100644
+--- a/drivers/xen/xen-pciback/pciback.h
++++ b/drivers/xen/xen-pciback/pciback.h
+@@ -37,6 +37,7 @@ struct xen_pcibk_device {
+ struct xen_pci_sharedinfo *sh_info;
+ unsigned long flags;
+ struct work_struct op_work;
++ struct xen_pci_op op;
+ };
+
+ struct xen_pcibk_dev_data {
+diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
+index c4a0666de6f5..7eb8f8895a24 100644
+--- a/drivers/xen/xen-pciback/pciback_ops.c
++++ b/drivers/xen/xen-pciback/pciback_ops.c
+@@ -197,13 +197,26 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
+ struct xen_pcibk_dev_data *dev_data;
+ int i, result;
+ struct msix_entry *entries;
++ u16 cmd;
+
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
+ pci_name(dev));
++
+ if (op->value > SH_INFO_MAX_VEC)
+ return -EINVAL;
+
++ if (dev->msix_enabled)
++ return -EALREADY;
++
++ /*
++ * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
++ * to access the BARs where the MSI-X entries reside.
++ */
++ pci_read_config_word(dev, PCI_COMMAND, &cmd);
++ if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
++ return -ENXIO;
++
+ entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
+ if (entries == NULL)
+ return -ENOMEM;
+@@ -298,9 +311,11 @@ void xen_pcibk_do_op(struct work_struct *data)
+ container_of(data, struct xen_pcibk_device, op_work);
+ struct pci_dev *dev;
+ struct xen_pcibk_dev_data *dev_data = NULL;
+- struct xen_pci_op *op = &pdev->sh_info->op;
++ struct xen_pci_op *op = &pdev->op;
+ int test_intx = 0;
+
++ *op = pdev->sh_info->op;
++ barrier();
+ dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
+
+ if (dev == NULL)
+@@ -342,6 +357,17 @@ void xen_pcibk_do_op(struct work_struct *data)
+ if ((dev_data->enable_intx != test_intx))
+ xen_pcibk_control_isr(dev, 0 /* no reset */);
+ }
++ pdev->sh_info->op.err = op->err;
++ pdev->sh_info->op.value = op->value;
++#ifdef CONFIG_PCI_MSI
++ if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
++ unsigned int i;
++
++ for (i = 0; i < op->value; i++)
++ pdev->sh_info->op.msix_entries[i].vector =
++ op->msix_entries[i].vector;
++ }
++#endif
+ /* Tell the driver domain that we're done. */
+ wmb();
+ clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
+diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
+index d30c6b246342..7dec1407f1c7 100644
+--- a/drivers/xen/xen-scsiback.c
++++ b/drivers/xen/xen-scsiback.c
+@@ -947,12 +947,12 @@ out:
+ spin_unlock_irqrestore(&info->v2p_lock, flags);
+
+ out_free:
+- mutex_lock(&tpg->tv_tpg_mutex);
+- tpg->tv_tpg_fe_count--;
+- mutex_unlock(&tpg->tv_tpg_mutex);
+-
+- if (err)
++ if (err) {
++ mutex_lock(&tpg->tv_tpg_mutex);
++ tpg->tv_tpg_fe_count--;
++ mutex_unlock(&tpg->tv_tpg_mutex);
+ kfree(new);
++ }
+
+ return err;
+ }
+diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
+index 002e0c173939..15da9e346f60 100644
+--- a/fs/cifs/cifsfs.h
++++ b/fs/cifs/cifsfs.h
+@@ -31,19 +31,15 @@
+ * so that it will fit. We use hash_64 to convert the value to 31 bits, and
+ * then add 1, to ensure that we don't end up with a 0 as the value.
+ */
+-#if BITS_PER_LONG == 64
+ static inline ino_t
+ cifs_uniqueid_to_ino_t(u64 fileid)
+ {
++ if ((sizeof(ino_t)) < (sizeof(u64)))
++ return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
++
+ return (ino_t)fileid;
++
+ }
+-#else
+-static inline ino_t
+-cifs_uniqueid_to_ino_t(u64 fileid)
+-{
+- return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
+-}
+-#endif
+
+ extern struct file_system_type cifs_fs_type;
+ extern const struct address_space_operations cifs_addr_ops;
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 4ae04561be88..9924c8cbe70f 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -1394,11 +1394,10 @@ openRetry:
+ * current bigbuf.
+ */
+ static int
+-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
++discard_remaining_data(struct TCP_Server_Info *server)
+ {
+ unsigned int rfclen = get_rfc1002_length(server->smallbuf);
+ int remaining = rfclen + 4 - server->total_read;
+- struct cifs_readdata *rdata = mid->callback_data;
+
+ while (remaining > 0) {
+ int length;
+@@ -1412,10 +1411,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ remaining -= length;
+ }
+
+- dequeue_mid(mid, rdata->result);
+ return 0;
+ }
+
++static int
++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
++{
++ int length;
++ struct cifs_readdata *rdata = mid->callback_data;
++
++ length = discard_remaining_data(server);
++ dequeue_mid(mid, rdata->result);
++ return length;
++}
++
+ int
+ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ {
+@@ -1444,6 +1453,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ return length;
+ server->total_read += length;
+
++ if (server->ops->is_status_pending &&
++ server->ops->is_status_pending(buf, server, 0)) {
++ discard_remaining_data(server);
++ return -1;
++ }
++
+ /* Was the SMB read successful? */
+ rdata->result = server->ops->map_error(buf, false);
+ if (rdata->result != 0) {
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 8f1672bb82d5..3398ac7e7b37 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -997,21 +997,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
+ {
+ char *data_offset;
+ struct create_context *cc;
+- unsigned int next = 0;
++ unsigned int next;
++ unsigned int remaining;
+ char *name;
+
+ data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
++ remaining = le32_to_cpu(rsp->CreateContextsLength);
+ cc = (struct create_context *)data_offset;
+- do {
+- cc = (struct create_context *)((char *)cc + next);
++ while (remaining >= sizeof(struct create_context)) {
+ name = le16_to_cpu(cc->NameOffset) + (char *)cc;
+- if (le16_to_cpu(cc->NameLength) != 4 ||
+- strncmp(name, "RqLs", 4)) {
+- next = le32_to_cpu(cc->Next);
+- continue;
+- }
+- return server->ops->parse_lease_buf(cc, epoch);
+- } while (next != 0);
++ if (le16_to_cpu(cc->NameLength) == 4 &&
++ strncmp(name, "RqLs", 4) == 0)
++ return server->ops->parse_lease_buf(cc, epoch);
++
++ next = le32_to_cpu(cc->Next);
++ if (!next)
++ break;
++ remaining -= next;
++ cc = (struct create_context *)((char *)cc + next);
++ }
+
+ return 0;
+ }
+diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
+index 0642cafaab34..12da295759f7 100644
+--- a/fs/hpfs/namei.c
++++ b/fs/hpfs/namei.c
+@@ -377,12 +377,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
+ struct inode *inode = dentry->d_inode;
+ dnode_secno dno;
+ int r;
+- int rep = 0;
+ int err;
+
+ hpfs_lock(dir->i_sb);
+ hpfs_adjust_length(name, &len);
+-again:
++
+ err = -ENOENT;
+ de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
+ if (!de)
+@@ -402,33 +401,9 @@ again:
+ hpfs_error(dir->i_sb, "there was error when removing dirent");
+ err = -EFSERROR;
+ break;
+- case 2: /* no space for deleting, try to truncate file */
+-
++ case 2: /* no space for deleting */
+ err = -ENOSPC;
+- if (rep++)
+- break;
+-
+- dentry_unhash(dentry);
+- if (!d_unhashed(dentry)) {
+- hpfs_unlock(dir->i_sb);
+- return -ENOSPC;
+- }
+- if (generic_permission(inode, MAY_WRITE) ||
+- !S_ISREG(inode->i_mode) ||
+- get_write_access(inode)) {
+- d_rehash(dentry);
+- } else {
+- struct iattr newattrs;
+- /*pr_info("truncating file before delete.\n");*/
+- newattrs.ia_size = 0;
+- newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+- err = notify_change(dentry, &newattrs, NULL);
+- put_write_access(inode);
+- if (!err)
+- goto again;
+- }
+- hpfs_unlock(dir->i_sb);
+- return -ENOSPC;
++ break;
+ default:
+ drop_nlink(inode);
+ err = 0;
+diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
+index 3ea36554107f..8918ac905a3b 100644
+--- a/fs/jffs2/README.Locking
++++ b/fs/jffs2/README.Locking
+@@ -2,10 +2,6 @@
+ JFFS2 LOCKING DOCUMENTATION
+ ---------------------------
+
+-At least theoretically, JFFS2 does not require the Big Kernel Lock
+-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS
+-code. It has its own locking, as described below.
+-
+ This document attempts to describe the existing locking rules for
+ JFFS2. It is not expected to remain perfectly up to date, but ought to
+ be fairly close.
+@@ -69,6 +65,7 @@ Ordering constraints:
+ any f->sem held.
+ 2. Never attempt to lock two file mutexes in one thread.
+ No ordering rules have been made for doing so.
++ 3. Never lock a page cache page with f->sem held.
+
+
+ erase_completion_lock spinlock
+diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
+index a3750f902adc..c1f04947d7dc 100644
+--- a/fs/jffs2/build.c
++++ b/fs/jffs2/build.c
+@@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
+
+
+ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
+- struct jffs2_inode_cache *ic)
++ struct jffs2_inode_cache *ic,
++ int *dir_hardlinks)
+ {
+ struct jffs2_full_dirent *fd;
+
+@@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
+ dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
+ fd->name, fd->ino, ic->ino);
+ jffs2_mark_node_obsolete(c, fd->raw);
++ /* Clear the ic/raw union so it doesn't cause problems later. */
++ fd->ic = NULL;
+ continue;
+ }
+
++ /* From this point, fd->raw is no longer used so we can set fd->ic */
++ fd->ic = child_ic;
++ child_ic->pino_nlink++;
++ /* If we appear (at this stage) to have hard-linked directories,
++ * set a flag to trigger a scan later */
+ if (fd->type == DT_DIR) {
+- if (child_ic->pino_nlink) {
+- JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
+- fd->name, fd->ino, ic->ino);
+- /* TODO: What do we do about it? */
+- } else {
+- child_ic->pino_nlink = ic->ino;
+- }
+- } else
+- child_ic->pino_nlink++;
++ child_ic->flags |= INO_FLAGS_IS_DIR;
++ if (child_ic->pino_nlink > 1)
++ *dir_hardlinks = 1;
++ }
+
+ dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
+ /* Can't free scan_dents so far. We might need them in pass 2 */
+@@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
+ */
+ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+ {
+- int ret;
+- int i;
++ int ret, i, dir_hardlinks = 0;
+ struct jffs2_inode_cache *ic;
+ struct jffs2_full_dirent *fd;
+ struct jffs2_full_dirent *dead_fds = NULL;
+@@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+ /* Now scan the directory tree, increasing nlink according to every dirent found. */
+ for_each_inode(i, c, ic) {
+ if (ic->scan_dents) {
+- jffs2_build_inode_pass1(c, ic);
++ jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
+ cond_resched();
+ }
+ }
+@@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+ }
+
+ dbg_fsbuild("pass 2a complete\n");
++
++ if (dir_hardlinks) {
++ /* If we detected directory hardlinks earlier, *hopefully*
++ * they are gone now because some of the links were from
++ * dead directories which still had some old dirents lying
++ * around and not yet garbage-collected, but which have
++ * been discarded above. So clear the pino_nlink field
++ * in each directory, so that the final scan below can
++ * print appropriate warnings. */
++ for_each_inode(i, c, ic) {
++ if (ic->flags & INO_FLAGS_IS_DIR)
++ ic->pino_nlink = 0;
++ }
++ }
+ dbg_fsbuild("freeing temporary data structures\n");
+
+ /* Finally, we can scan again and free the dirent structs */
+@@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+ while(ic->scan_dents) {
+ fd = ic->scan_dents;
+ ic->scan_dents = fd->next;
++ /* We do use the pino_nlink field to count nlink of
++ * directories during fs build, so set it to the
++ * parent ino# now. Now that there's hopefully only
++ * one. */
++ if (fd->type == DT_DIR) {
++ if (!fd->ic) {
++ /* We'll have complained about it and marked the coresponding
++ raw node obsolete already. Just skip it. */
++ continue;
++ }
++
++ /* We *have* to have set this in jffs2_build_inode_pass1() */
++ BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
++
++ /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks
++ * is set. Otherwise, we know this should never trigger anyway, so
++ * we don't do the check. And ic->pino_nlink still contains the nlink
++ * value (which is 1). */
++ if (dir_hardlinks && fd->ic->pino_nlink) {
++ JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
++ fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
++ /* Should we unlink it from its previous parent? */
++ }
++
++ /* For directories, ic->pino_nlink holds that parent inode # */
++ fd->ic->pino_nlink = ic->ino;
++ }
+ jffs2_free_full_dirent(fd);
+ }
+ ic->scan_dents = NULL;
+@@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
+
+ /* Reduce nlink of the child. If it's now zero, stick it on the
+ dead_fds list to be cleaned up later. Else just free the fd */
+-
+- if (fd->type == DT_DIR)
+- child_ic->pino_nlink = 0;
+- else
+- child_ic->pino_nlink--;
++ child_ic->pino_nlink--;
+
+ if (!child_ic->pino_nlink) {
+ dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
+diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
+index 64989ca9ba90..129eccb4c5a8 100644
+--- a/fs/jffs2/file.c
++++ b/fs/jffs2/file.c
+@@ -139,39 +139,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ struct page *pg;
+ struct inode *inode = mapping->host;
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+- struct jffs2_raw_inode ri;
+- uint32_t alloc_len = 0;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ uint32_t pageofs = index << PAGE_CACHE_SHIFT;
+ int ret = 0;
+
+- jffs2_dbg(1, "%s()\n", __func__);
+-
+- if (pageofs > inode->i_size) {
+- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+- if (ret)
+- return ret;
+- }
+-
+- mutex_lock(&f->sem);
+ pg = grab_cache_page_write_begin(mapping, index, flags);
+- if (!pg) {
+- if (alloc_len)
+- jffs2_complete_reservation(c);
+- mutex_unlock(&f->sem);
++ if (!pg)
+ return -ENOMEM;
+- }
+ *pagep = pg;
+
+- if (alloc_len) {
++ jffs2_dbg(1, "%s()\n", __func__);
++
++ if (pageofs > inode->i_size) {
+ /* Make new hole frag from old EOF to new page */
++ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
++ struct jffs2_raw_inode ri;
+ struct jffs2_full_dnode *fn;
++ uint32_t alloc_len;
+
+ jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
+ (unsigned int)inode->i_size, pageofs);
+
++ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
++ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
++ if (ret)
++ goto out_page;
++
++ mutex_lock(&f->sem);
+ memset(&ri, 0, sizeof(ri));
+
+ ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+@@ -198,6 +192,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ if (IS_ERR(fn)) {
+ ret = PTR_ERR(fn);
+ jffs2_complete_reservation(c);
++ mutex_unlock(&f->sem);
+ goto out_page;
+ }
+ ret = jffs2_add_full_dnode_to_inode(c, f, fn);
+@@ -212,10 +207,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ jffs2_mark_node_obsolete(c, fn->raw);
+ jffs2_free_full_dnode(fn);
+ jffs2_complete_reservation(c);
++ mutex_unlock(&f->sem);
+ goto out_page;
+ }
+ jffs2_complete_reservation(c);
+ inode->i_size = pageofs;
++ mutex_unlock(&f->sem);
+ }
+
+ /*
+@@ -224,18 +221,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ * case of a short-copy.
+ */
+ if (!PageUptodate(pg)) {
++ mutex_lock(&f->sem);
+ ret = jffs2_do_readpage_nolock(inode, pg);
++ mutex_unlock(&f->sem);
+ if (ret)
+ goto out_page;
+ }
+- mutex_unlock(&f->sem);
+ jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
+ return ret;
+
+ out_page:
+ unlock_page(pg);
+ page_cache_release(pg);
+- mutex_unlock(&f->sem);
+ return ret;
+ }
+
+diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
+index 5a2dec2b064c..95d5880a63ee 100644
+--- a/fs/jffs2/gc.c
++++ b/fs/jffs2/gc.c
+@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
+ BUG_ON(start > orig_start);
+ }
+
+- /* First, use readpage() to read the appropriate page into the page cache */
+- /* Q: What happens if we actually try to GC the _same_ page for which commit_write()
+- * triggered garbage collection in the first place?
+- * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
+- * page OK. We'll actually write it out again in commit_write, which is a little
+- * suboptimal, but at least we're correct.
+- */
++ /* The rules state that we must obtain the page lock *before* f->sem, so
++ * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
++ * actually going to *change* so we're safe; we only allow reading.
++ *
++ * It is important to note that jffs2_write_begin() will ensure that its
++ * page is marked Uptodate before allocating space. That means that if we
++ * end up here trying to GC the *same* page that jffs2_write_begin() is
++ * trying to write out, read_cache_page() will not deadlock. */
++ mutex_unlock(&f->sem);
+ pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
++ mutex_lock(&f->sem);
+
+ if (IS_ERR(pg_ptr)) {
+ pr_warn("read_cache_page() returned error: %ld\n",
+diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
+index fa35ff79ab35..0637271f3770 100644
+--- a/fs/jffs2/nodelist.h
++++ b/fs/jffs2/nodelist.h
+@@ -194,6 +194,7 @@ struct jffs2_inode_cache {
+ #define INO_STATE_CLEARING 6 /* In clear_inode() */
+
+ #define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */
++#define INO_FLAGS_IS_DIR 0x02 /* is a directory */
+
+ #define RAWNODE_CLASS_INODE_CACHE 0
+ #define RAWNODE_CLASS_XATTR_DATUM 1
+@@ -249,7 +250,10 @@ struct jffs2_readinode_info
+
+ struct jffs2_full_dirent
+ {
+- struct jffs2_raw_node_ref *raw;
++ union {
++ struct jffs2_raw_node_ref *raw;
++ struct jffs2_inode_cache *ic; /* Just during part of build */
++ };
+ struct jffs2_full_dirent *next;
+ uint32_t version;
+ uint32_t ino; /* == zero for unlink */
+diff --git a/fs/namei.c b/fs/namei.c
+index be3d538d56b3..2a8ba683a888 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3111,6 +3111,10 @@ opened:
+ goto exit_fput;
+ }
+ out:
++ if (unlikely(error > 0)) {
++ WARN_ON(1);
++ error = -EINVAL;
++ }
+ if (got_write)
+ mnt_drop_write(nd->path.mnt);
+ path_put(&save_parent);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 4561b2bd1de1..575fb0ebf980 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2258,9 +2258,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+ dentry = d_add_unique(dentry, igrab(state->inode));
+ if (dentry == NULL) {
+ dentry = opendata->dentry;
+- } else if (dentry != ctx->dentry) {
++ } else {
+ dput(ctx->dentry);
+- ctx->dentry = dget(dentry);
++ ctx->dentry = dentry;
+ }
+ nfs_set_verifier(dentry,
+ nfs_save_change_attribute(opendata->dir->d_inode));
+diff --git a/include/linux/ata.h b/include/linux/ata.h
+index f2f4d8da97c0..f7ff6554a354 100644
+--- a/include/linux/ata.h
++++ b/include/linux/ata.h
+@@ -484,8 +484,8 @@ enum ata_tf_protocols {
+ };
+
+ enum ata_ioctls {
+- ATA_IOC_GET_IO32 = 0x309,
+- ATA_IOC_SET_IO32 = 0x324,
++ ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */
++ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */
+ };
+
+ /* core structures */
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index 7347f486ceca..08ce93ede229 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -292,6 +292,43 @@ static inline unsigned bio_segments(struct bio *bio)
+ */
+ #define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
+
++static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
++{
++ *bv = bio_iovec(bio);
++}
++
++static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
++{
++ struct bvec_iter iter = bio->bi_iter;
++ int idx;
++
++ if (!bio_flagged(bio, BIO_CLONED)) {
++ *bv = bio->bi_io_vec[bio->bi_vcnt - 1];
++ return;
++ }
++
++ if (unlikely(!bio_multiple_segments(bio))) {
++ *bv = bio_iovec(bio);
++ return;
++ }
++
++ bio_advance_iter(bio, &iter, iter.bi_size);
++
++ if (!iter.bi_bvec_done)
++ idx = iter.bi_idx - 1;
++ else /* in the middle of bvec */
++ idx = iter.bi_idx;
++
++ *bv = bio->bi_io_vec[idx];
++
++ /*
++ * iter.bi_bvec_done records actual length of the last bvec
++ * if this bio ends in the middle of one io vector
++ */
++ if (iter.bi_bvec_done)
++ bv->bv_len = iter.bi_bvec_done;
++}
++
+ enum bip_flags {
+ BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
+ BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index a60e04bcda0f..d69ce28efab1 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -715,7 +715,7 @@ struct ata_device {
+ union {
+ u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
+ u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
+- };
++ } ____cacheline_aligned;
+
+ /* DEVSLP Timing Variables from Identify Device Data Log */
+ u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index c72d1ad41ad4..2446e1e21bc7 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -538,9 +538,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
+
+ static inline loff_t nfs_size_to_loff_t(__u64 size)
+ {
+- if (size > (__u64) OFFSET_MAX - 1)
+- return OFFSET_MAX - 1;
+- return (loff_t) size;
++ return min_t(u64, size, OFFSET_MAX);
+ }
+
+ static inline ino_t
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index c4b85a5889ba..ad9ce86428a6 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -520,7 +520,7 @@ struct se_cmd {
+ sense_reason_t (*execute_cmd)(struct se_cmd *);
+ sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
+ u32, enum dma_data_direction);
+- sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
++ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
+
+ unsigned char *t_task_cdb;
+ unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
+@@ -535,6 +535,8 @@ struct se_cmd {
+ #define CMD_T_DEV_ACTIVE (1 << 7)
+ #define CMD_T_REQUEST_STOP (1 << 8)
+ #define CMD_T_BUSY (1 << 9)
++#define CMD_T_TAS (1 << 10)
++#define CMD_T_FABRIC_STOP (1 << 11)
+ spinlock_t t_state_lock;
+ struct completion t_transport_stop_comp;
+
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 1b0df1e504f0..b6cd1b653e21 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -644,7 +644,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
+ * The ftrace subsystem is for showing formats only.
+ * They can not be enabled or disabled via the event files.
+ */
+- if (call->class && call->class->reg)
++ if (call->class && call->class->reg &&
++ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+ return file;
+ }
+
+diff --git a/mm/memory.c b/mm/memory.c
+index 90fb265b32b6..e8e3cf7bd247 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3316,8 +3316,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ if (unlikely(pmd_none(*pmd)) &&
+ unlikely(__pte_alloc(mm, vma, pmd, address)))
+ return VM_FAULT_OOM;
+- /* if an huge pmd materialized from under us just retry later */
+- if (unlikely(pmd_trans_huge(*pmd)))
++ /*
++ * If a huge pmd materialized under us just retry later. Use
++ * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
++ * didn't become pmd_trans_huge under us and then back to pmd_none, as
++ * a result of MADV_DONTNEED running immediately after a huge pmd fault
++ * in a different thread of this mm, in turn leading to a misleading
++ * pmd_trans_huge() retval. All we have to ensure is that it is a
++ * regular pmd that we can walk with pte_offset_map() and we can do that
++ * through an atomic read in C, which is what pmd_trans_unstable()
++ * provides.
++ */
++ if (unlikely(pmd_trans_unstable(pmd)))
+ return 0;
+ /*
+ * A regular pmd is established and it can't morph into a huge pmd
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 56a65536c8f1..1519051603ff 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -2287,7 +2287,7 @@ static int read_partial_message(struct ceph_connection *con)
+ con->in_base_pos = -front_len - middle_len - data_len -
+ sizeof(m->footer);
+ con->in_tag = CEPH_MSGR_TAG_READY;
+- return 0;
++ return 1;
+ } else if ((s64)seq - (s64)con->in_seq > 1) {
+ pr_err("read_partial_message bad seq %lld expected %lld\n",
+ seq, con->in_seq + 1);
+@@ -2320,7 +2320,7 @@ static int read_partial_message(struct ceph_connection *con)
+ sizeof(m->footer);
+ con->in_tag = CEPH_MSGR_TAG_READY;
+ con->in_seq++;
+- return 0;
++ return 1;
+ }
+
+ BUG_ON(!con->in_msg);
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 25cd22c1ddee..1a93a39b2aab 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -360,6 +360,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
+
++ ip6gre_tunnel_unlink(ign, t);
+ ip6_tnl_dst_reset(netdev_priv(dev));
+ dev_put(dev);
+ }
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 7b5cb003ee22..9524b4596bf5 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -374,6 +374,9 @@ int ip6_forward(struct sk_buff *skb)
+ if (skb->pkt_type != PACKET_HOST)
+ goto drop;
+
++ if (unlikely(skb->sk))
++ goto drop;
++
+ if (skb_warn_if_lro(skb))
+ goto drop;
+
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 48f14003af10..14d38ec5e53d 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1230,7 +1230,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
+ if (bp[0] == '\\' && bp[1] == 'x') {
+ /* HEX STRING */
+ bp += 2;
+- while (len < bufsize) {
++ while (len < bufsize - 1) {
+ int h, l;
+
+ h = hex_to_bin(bp[0]);
+diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
+index b9c0910fb8c4..0608f216f359 100644
+--- a/sound/core/control_compat.c
++++ b/sound/core/control_compat.c
+@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 {
+ unsigned char reserved[128];
+ };
+
++#ifdef CONFIG_X86_X32
++/* x32 has a different alignment for 64bit values from ia32 */
++struct snd_ctl_elem_value_x32 {
++ struct snd_ctl_elem_id id;
++ unsigned int indirect; /* bit-field causes misalignment */
++ union {
++ s32 integer[128];
++ unsigned char data[512];
++ s64 integer64[64];
++ } value;
++ unsigned char reserved[128];
++};
++#endif /* CONFIG_X86_X32 */
+
+ /* get the value type and count of the control */
+ static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
+@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count)
+
+ static int copy_ctl_value_from_user(struct snd_card *card,
+ struct snd_ctl_elem_value *data,
+- struct snd_ctl_elem_value32 __user *data32,
++ void __user *userdata,
++ void __user *valuep,
+ int *typep, int *countp)
+ {
++ struct snd_ctl_elem_value32 __user *data32 = userdata;
+ int i, type, size;
+ int uninitialized_var(count);
+ unsigned int indirect;
+@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card,
+ if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
+ type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
+ for (i = 0; i < count; i++) {
++ s32 __user *intp = valuep;
+ int val;
+- if (get_user(val, &data32->value.integer[i]))
++ if (get_user(val, &intp[i]))
+ return -EFAULT;
+ data->value.integer.value[i] = val;
+ }
+@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card,
+ dev_err(card->dev, "snd_ioctl32_ctl_elem_value: unknown type %d\n", type);
+ return -EINVAL;
+ }
+- if (copy_from_user(data->value.bytes.data,
+- data32->value.data, size))
++ if (copy_from_user(data->value.bytes.data, valuep, size))
+ return -EFAULT;
+ }
+
+@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card,
+ }
+
+ /* restore the value to 32bit */
+-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
++static int copy_ctl_value_to_user(void __user *userdata,
++ void __user *valuep,
+ struct snd_ctl_elem_value *data,
+ int type, int count)
+ {
+@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
+ if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
+ type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
+ for (i = 0; i < count; i++) {
++ s32 __user *intp = valuep;
+ int val;
+ val = data->value.integer.value[i];
+- if (put_user(val, &data32->value.integer[i]))
++ if (put_user(val, &intp[i]))
+ return -EFAULT;
+ }
+ } else {
+ size = get_elem_size(type, count);
+- if (copy_to_user(data32->value.data,
+- data->value.bytes.data, size))
++ if (copy_to_user(valuep, data->value.bytes.data, size))
+ return -EFAULT;
+ }
+ return 0;
+ }
+
+-static int snd_ctl_elem_read_user_compat(struct snd_card *card,
+- struct snd_ctl_elem_value32 __user *data32)
++static int ctl_elem_read_user(struct snd_card *card,
++ void __user *userdata, void __user *valuep)
+ {
+ struct snd_ctl_elem_value *data;
+ int err, type, count;
+@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
+ if (data == NULL)
+ return -ENOMEM;
+
+- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
++ err = copy_ctl_value_from_user(card, data, userdata, valuep,
++ &type, &count);
++ if (err < 0)
+ goto error;
+
+ snd_power_lock(card);
+@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
+ err = snd_ctl_elem_read(card, data);
+ snd_power_unlock(card);
+ if (err >= 0)
+- err = copy_ctl_value_to_user(data32, data, type, count);
++ err = copy_ctl_value_to_user(userdata, valuep, data,
++ type, count);
+ error:
+ kfree(data);
+ return err;
+ }
+
+-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+- struct snd_ctl_elem_value32 __user *data32)
++static int ctl_elem_write_user(struct snd_ctl_file *file,
++ void __user *userdata, void __user *valuep)
+ {
+ struct snd_ctl_elem_value *data;
+ struct snd_card *card = file->card;
+@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+ if (data == NULL)
+ return -ENOMEM;
+
+- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
++ err = copy_ctl_value_from_user(card, data, userdata, valuep,
++ &type, &count);
++ if (err < 0)
+ goto error;
+
+ snd_power_lock(card);
+@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+ err = snd_ctl_elem_write(card, file, data);
+ snd_power_unlock(card);
+ if (err >= 0)
+- err = copy_ctl_value_to_user(data32, data, type, count);
++ err = copy_ctl_value_to_user(userdata, valuep, data,
++ type, count);
+ error:
+ kfree(data);
+ return err;
+ }
+
++static int snd_ctl_elem_read_user_compat(struct snd_card *card,
++ struct snd_ctl_elem_value32 __user *data32)
++{
++ return ctl_elem_read_user(card, data32, &data32->value);
++}
++
++static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
++ struct snd_ctl_elem_value32 __user *data32)
++{
++ return ctl_elem_write_user(file, data32, &data32->value);
++}
++
++#ifdef CONFIG_X86_X32
++static int snd_ctl_elem_read_user_x32(struct snd_card *card,
++ struct snd_ctl_elem_value_x32 __user *data32)
++{
++ return ctl_elem_read_user(card, data32, &data32->value);
++}
++
++static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file,
++ struct snd_ctl_elem_value_x32 __user *data32)
++{
++ return ctl_elem_write_user(file, data32, &data32->value);
++}
++#endif /* CONFIG_X86_X32 */
++
+ /* add or replace a user control */
+ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
+ struct snd_ctl_elem_info32 __user *data32,
+@@ -393,6 +441,10 @@ enum {
+ SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32),
+ SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32),
+ SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32),
++#ifdef CONFIG_X86_X32
++ SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32),
++ SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32),
++#endif /* CONFIG_X86_X32 */
+ };
+
+ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns
+ return snd_ctl_elem_add_compat(ctl, argp, 0);
+ case SNDRV_CTL_IOCTL_ELEM_REPLACE32:
+ return snd_ctl_elem_add_compat(ctl, argp, 1);
++#ifdef CONFIG_X86_X32
++ case SNDRV_CTL_IOCTL_ELEM_READ_X32:
++ return snd_ctl_elem_read_user_x32(ctl->card, argp);
++ case SNDRV_CTL_IOCTL_ELEM_WRITE_X32:
++ return snd_ctl_elem_write_user_x32(ctl, argp);
++#endif /* CONFIG_X86_X32 */
+ }
+
+ down_read(&snd_ioctl_rwsem);
+diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
+index 5268c1f58c25..09a89094dcf7 100644
+--- a/sound/core/rawmidi_compat.c
++++ b/sound/core/rawmidi_compat.c
+@@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
+ return 0;
+ }
+
++#ifdef CONFIG_X86_X32
++/* X32 ABI has 64bit timespec and 64bit alignment */
++struct snd_rawmidi_status_x32 {
++ s32 stream;
++ u32 rsvd; /* alignment */
++ struct timespec tstamp;
++ u32 avail;
++ u32 xruns;
++ unsigned char reserved[16];
++} __attribute__((packed));
++
++#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
++
++static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
++ struct snd_rawmidi_status_x32 __user *src)
++{
++ int err;
++ struct snd_rawmidi_status status;
++
++ if (rfile->output == NULL)
++ return -EINVAL;
++ if (get_user(status.stream, &src->stream))
++ return -EFAULT;
++
++ switch (status.stream) {
++ case SNDRV_RAWMIDI_STREAM_OUTPUT:
++ err = snd_rawmidi_output_status(rfile->output, &status);
++ break;
++ case SNDRV_RAWMIDI_STREAM_INPUT:
++ err = snd_rawmidi_input_status(rfile->input, &status);
++ break;
++ default:
++ return -EINVAL;
++ }
++ if (err < 0)
++ return err;
++
++ if (put_timespec(&status.tstamp, &src->tstamp) ||
++ put_user(status.avail, &src->avail) ||
++ put_user(status.xruns, &src->xruns))
++ return -EFAULT;
++
++ return 0;
++}
++#endif /* CONFIG_X86_X32 */
++
+ enum {
+ SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32),
+ SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32),
++#ifdef CONFIG_X86_X32
++ SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32),
++#endif /* CONFIG_X86_X32 */
+ };
+
+ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+@@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign
+ return snd_rawmidi_ioctl_params_compat(rfile, argp);
+ case SNDRV_RAWMIDI_IOCTL_STATUS32:
+ return snd_rawmidi_ioctl_status_compat(rfile, argp);
++#ifdef CONFIG_X86_X32
++ case SNDRV_RAWMIDI_IOCTL_STATUS_X32:
++ return snd_rawmidi_ioctl_status_x32(rfile, argp);
++#endif /* CONFIG_X86_X32 */
+ }
+ return -ENOIOCTLCMD;
+ }
+diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
+index 16d42679e43f..bb032d7593e3 100644
+--- a/sound/core/seq/oss/seq_oss.c
++++ b/sound/core/seq/oss/seq_oss.c
+@@ -144,8 +144,6 @@ odev_release(struct inode *inode, struct file *file)
+ if ((dp = file->private_data) == NULL)
+ return 0;
+
+- snd_seq_oss_drain_write(dp);
+-
+ mutex_lock(&register_mutex);
+ snd_seq_oss_release(dp);
+ mutex_unlock(&register_mutex);
+diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h
+index b43924325249..d7b4d016b547 100644
+--- a/sound/core/seq/oss/seq_oss_device.h
++++ b/sound/core/seq/oss/seq_oss_device.h
+@@ -127,7 +127,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co
+ unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
+
+ void snd_seq_oss_reset(struct seq_oss_devinfo *dp);
+-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp);
+
+ /* */
+ void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time);
+diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
+index beea8c861f49..acefecb1a47a 100644
+--- a/sound/core/seq/oss/seq_oss_init.c
++++ b/sound/core/seq/oss/seq_oss_init.c
+@@ -441,22 +441,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp)
+
+
+ /*
+- * Wait until the queue is empty (if we don't have nonblock)
+- */
+-void
+-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp)
+-{
+- if (! dp->timer->running)
+- return;
+- if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) &&
+- dp->writeq) {
+- while (snd_seq_oss_writeq_sync(dp->writeq))
+- ;
+- }
+-}
+-
+-
+-/*
+ * reset sequencer devices
+ */
+ void
+diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
+index e05802ae6e1b..2e908225d754 100644
+--- a/sound/core/timer_compat.c
++++ b/sound/core/timer_compat.c
+@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file,
+ struct snd_timer_status32 __user *_status)
+ {
+ struct snd_timer_user *tu;
+- struct snd_timer_status status;
++ struct snd_timer_status32 status;
+
+ tu = file->private_data;
+ if (snd_BUG_ON(!tu->timeri))
+ return -ENXIO;
+ memset(&status, 0, sizeof(status));
+- status.tstamp = tu->tstamp;
++ status.tstamp.tv_sec = tu->tstamp.tv_sec;
++ status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
+ status.resolution = snd_timer_resolution(tu->timeri);
+ status.lost = tu->timeri->lost;
+ status.overrun = tu->overrun;
+@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file,
+ return 0;
+ }
+
++#ifdef CONFIG_X86_X32
++/* X32 ABI has the same struct as x86-64 */
++#define snd_timer_user_status_x32(file, s) \
++ snd_timer_user_status(file, s)
++#endif /* CONFIG_X86_X32 */
++
+ /*
+ */
+
+ enum {
+ SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
+ SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
++#ifdef CONFIG_X86_X32
++ SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status),
++#endif /* CONFIG_X86_X32 */
+ };
+
+ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
+ return snd_timer_user_info_compat(file, argp);
+ case SNDRV_TIMER_IOCTL_STATUS32:
+ return snd_timer_user_status_compat(file, argp);
++#ifdef CONFIG_X86_X32
++ case SNDRV_TIMER_IOCTL_STATUS_X32:
++ return snd_timer_user_status_x32(file, argp);
++#endif /* CONFIG_X86_X32 */
+ }
+ return -ENOIOCTLCMD;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f9f929d5130a..0405e9753c04 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -29,6 +29,7 @@
+ #include <linux/pci.h>
+ #include <linux/dmi.h>
+ #include <linux/module.h>
++#include <linux/input.h>
+ #include <sound/core.h>
+ #include <sound/jack.h>
+ #include "hda_codec.h"
+@@ -96,6 +97,8 @@ struct alc_spec {
+ hda_nid_t cap_mute_led_nid;
+
+ unsigned int gpio_led; /* used for alc269_fixup_hp_gpio_led() */
++ unsigned int gpio_mute_led_mask;
++ unsigned int gpio_mic_led_mask;
+
+ hda_nid_t headset_mic_pin;
+ hda_nid_t headphone_mic_pin;
+@@ -118,6 +121,9 @@ struct alc_spec {
+ hda_nid_t pll_nid;
+ unsigned int pll_coef_idx, pll_coef_bit;
+ unsigned int coef0;
++#if IS_ENABLED(CONFIG_INPUT)
++ struct input_dev *kb_dev;
++#endif
+ };
+
+ /*
+@@ -3322,41 +3328,45 @@ static void alc269_fixup_hp_mute_led_mic2(struct hda_codec *codec,
+ }
+ }
+
+-/* turn on/off mute LED per vmaster hook */
+-static void alc269_fixup_hp_gpio_mute_hook(void *private_data, int enabled)
++/* update LED status via GPIO */
++static void alc_update_gpio_led(struct hda_codec *codec, unsigned int mask,
++ bool enabled)
+ {
+- struct hda_codec *codec = private_data;
+ struct alc_spec *spec = codec->spec;
+ unsigned int oldval = spec->gpio_led;
+
++ if (spec->mute_led_polarity)
++ enabled = !enabled;
++
+ if (enabled)
+- spec->gpio_led &= ~0x08;
++ spec->gpio_led &= ~mask;
+ else
+- spec->gpio_led |= 0x08;
++ spec->gpio_led |= mask;
+ if (spec->gpio_led != oldval)
+ snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
+ spec->gpio_led);
+ }
+
+-/* turn on/off mic-mute LED per capture hook */
+-static void alc269_fixup_hp_gpio_mic_mute_hook(struct hda_codec *codec,
+- struct snd_kcontrol *kcontrol,
+- struct snd_ctl_elem_value *ucontrol)
++/* turn on/off mute LED via GPIO per vmaster hook */
++static void alc_fixup_gpio_mute_hook(void *private_data, int enabled)
+ {
++ struct hda_codec *codec = private_data;
+ struct alc_spec *spec = codec->spec;
+- unsigned int oldval = spec->gpio_led;
+
+- if (!ucontrol)
+- return;
++ alc_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled);
++}
+
+- if (ucontrol->value.integer.value[0] ||
+- ucontrol->value.integer.value[1])
+- spec->gpio_led &= ~0x10;
+- else
+- spec->gpio_led |= 0x10;
+- if (spec->gpio_led != oldval)
+- snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
+- spec->gpio_led);
++/* turn on/off mic-mute LED via GPIO per capture hook */
++static void alc_fixup_gpio_mic_mute_hook(struct hda_codec *codec,
++ struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *ucontrol)
++{
++ struct alc_spec *spec = codec->spec;
++
++ if (ucontrol)
++ alc_update_gpio_led(codec, spec->gpio_mic_led_mask,
++ ucontrol->value.integer.value[0] ||
++ ucontrol->value.integer.value[1]);
+ }
+
+ static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
+@@ -3370,9 +3380,33 @@ static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
+ };
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+- spec->gen.vmaster_mute.hook = alc269_fixup_hp_gpio_mute_hook;
+- spec->gen.cap_sync_hook = alc269_fixup_hp_gpio_mic_mute_hook;
++ spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
++ spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook;
+ spec->gpio_led = 0;
++ spec->mute_led_polarity = 0;
++ spec->gpio_mute_led_mask = 0x08;
++ spec->gpio_mic_led_mask = 0x10;
++ snd_hda_add_verbs(codec, gpio_init);
++ }
++}
++
++static void alc286_fixup_hp_gpio_led(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ struct alc_spec *spec = codec->spec;
++ static const struct hda_verb gpio_init[] = {
++ { 0x01, AC_VERB_SET_GPIO_MASK, 0x22 },
++ { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x22 },
++ {}
++ };
++
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
++ spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook;
++ spec->gpio_led = 0;
++ spec->mute_led_polarity = 0;
++ spec->gpio_mute_led_mask = 0x02;
++ spec->gpio_mic_led_mask = 0x20;
+ snd_hda_add_verbs(codec, gpio_init);
+ }
+ }
+@@ -3414,9 +3448,11 @@ static void alc269_fixup_hp_gpio_mic1_led(struct hda_codec *codec,
+ };
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+- spec->gen.vmaster_mute.hook = alc269_fixup_hp_gpio_mute_hook;
++ spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
+ spec->gen.cap_sync_hook = alc269_fixup_hp_cap_mic_mute_hook;
+ spec->gpio_led = 0;
++ spec->mute_led_polarity = 0;
++ spec->gpio_mute_led_mask = 0x08;
+ spec->cap_mute_led_nid = 0x18;
+ snd_hda_add_verbs(codec, gpio_init);
+ codec->power_filter = led_power_filter;
+@@ -3435,15 +3471,147 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
+ };
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+- spec->gen.vmaster_mute.hook = alc269_fixup_hp_gpio_mute_hook;
++ spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
+ spec->gen.cap_sync_hook = alc269_fixup_hp_cap_mic_mute_hook;
+ spec->gpio_led = 0;
++ spec->mute_led_polarity = 0;
++ spec->gpio_mute_led_mask = 0x08;
+ spec->cap_mute_led_nid = 0x18;
+ snd_hda_add_verbs(codec, gpio_init);
+ codec->power_filter = led_power_filter;
+ }
+ }
+
++#if IS_ENABLED(CONFIG_INPUT)
++static void gpio2_mic_hotkey_event(struct hda_codec *codec,
++ struct hda_jack_callback *event)
++{
++ struct alc_spec *spec = codec->spec;
++
++ /* GPIO2 just toggles on a keypress/keyrelease cycle. Therefore
++ send both key on and key off event for every interrupt. */
++ input_report_key(spec->kb_dev, KEY_MICMUTE, 1);
++ input_sync(spec->kb_dev);
++ input_report_key(spec->kb_dev, KEY_MICMUTE, 0);
++ input_sync(spec->kb_dev);
++}
++#endif
++
++static int alc_register_micmute_input_device(struct hda_codec *codec)
++{
++ struct alc_spec *spec = codec->spec;
++
++ spec->kb_dev = input_allocate_device();
++ if (!spec->kb_dev) {
++ codec_err(codec, "Out of memory (input_allocate_device)\n");
++ return -ENOMEM;
++ }
++ spec->kb_dev->name = "Microphone Mute Button";
++ spec->kb_dev->evbit[0] = BIT_MASK(EV_KEY);
++ spec->kb_dev->keybit[BIT_WORD(KEY_MICMUTE)] = BIT_MASK(KEY_MICMUTE);
++
++ if (input_register_device(spec->kb_dev)) {
++ codec_err(codec, "input_register_device failed\n");
++ input_free_device(spec->kb_dev);
++ spec->kb_dev = NULL;
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++#if IS_ENABLED(CONFIG_INPUT)
++ /* GPIO1 = set according to SKU external amp
++ GPIO2 = mic mute hotkey
++ GPIO3 = mute LED
++ GPIO4 = mic mute LED */
++ static const struct hda_verb gpio_init[] = {
++ { 0x01, AC_VERB_SET_GPIO_MASK, 0x1e },
++ { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x1a },
++ { 0x01, AC_VERB_SET_GPIO_DATA, 0x02 },
++ {}
++ };
++
++ struct alc_spec *spec = codec->spec;
++
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ if (alc_register_micmute_input_device(codec) != 0)
++ return;
++
++ snd_hda_add_verbs(codec, gpio_init);
++ snd_hda_codec_write_cache(codec, codec->afg, 0,
++ AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x04);
++ snd_hda_jack_detect_enable_callback(codec, codec->afg,
++ gpio2_mic_hotkey_event);
++
++ spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
++ spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook;
++ spec->gpio_led = 0;
++ spec->mute_led_polarity = 0;
++ spec->gpio_mute_led_mask = 0x08;
++ spec->gpio_mic_led_mask = 0x10;
++ return;
++ }
++
++ if (!spec->kb_dev)
++ return;
++
++ switch (action) {
++ case HDA_FIXUP_ACT_PROBE:
++ spec->init_amp = ALC_INIT_DEFAULT;
++ break;
++ case HDA_FIXUP_ACT_FREE:
++ input_unregister_device(spec->kb_dev);
++ input_free_device(spec->kb_dev);
++ spec->kb_dev = NULL;
++ }
++#endif
++}
++
++static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ /* Line2 = mic mute hotkey
++ GPIO2 = mic mute LED */
++ static const struct hda_verb gpio_init[] = {
++ { 0x01, AC_VERB_SET_GPIO_MASK, 0x04 },
++ { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04 },
++ {}
++ };
++
++ struct alc_spec *spec = codec->spec;
++
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ if (alc_register_micmute_input_device(codec) != 0)
++ return;
++
++ snd_hda_add_verbs(codec, gpio_init);
++ snd_hda_jack_detect_enable_callback(codec, 0x1b,
++ gpio2_mic_hotkey_event);
++
++ spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook;
++ spec->gpio_led = 0;
++ spec->mute_led_polarity = 0;
++ spec->gpio_mic_led_mask = 0x04;
++ return;
++ }
++
++ if (!spec->kb_dev)
++ return;
++
++ switch (action) {
++ case HDA_FIXUP_ACT_PROBE:
++ spec->init_amp = ALC_INIT_DEFAULT;
++ break;
++ case HDA_FIXUP_ACT_FREE:
++ input_unregister_device(spec->kb_dev);
++ spec->kb_dev = NULL;
++ }
++}
++
+ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+ {
+@@ -3629,6 +3797,10 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
+
+ static void alc_headset_mode_default(struct hda_codec *codec)
+ {
++ static struct coef_fw coef0225[] = {
++ UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
++ {}
++ };
+ static struct coef_fw coef0255[] = {
+ WRITE_COEF(0x45, 0xc089),
+ WRITE_COEF(0x45, 0xc489),
+@@ -3670,6 +3842,9 @@ static void alc_headset_mode_default(struct hda_codec *codec)
+ };
+
+ switch (codec->vendor_id) {
++ case 0x10ec0225:
++ alc_process_coef_fw(codec, coef0225);
++ break;
+ case 0x10ec0255:
+ case 0x10ec0256:
+ alc_process_coef_fw(codec, coef0255);
+@@ -4074,6 +4249,29 @@ static void alc_fixup_headset_mode_alc255_no_hp_mic(struct hda_codec *codec,
+ alc_fixup_headset_mode(codec, fix, action);
+ }
+
++static void alc288_update_headset_jack_cb(struct hda_codec *codec,
++ struct hda_jack_callback *jack)
++{
++ struct alc_spec *spec = codec->spec;
++ int present;
++
++ alc_update_headset_jack_cb(codec, jack);
++ /* Headset Mic enable or disable, only for Dell Dino */
++ present = spec->gen.hp_jack_present ? 0x40 : 0;
++ snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
++ present);
++}
++
++static void alc_fixup_headset_mode_dell_alc288(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ alc_fixup_headset_mode(codec, fix, action);
++ if (action == HDA_FIXUP_ACT_PROBE) {
++ struct alc_spec *spec = codec->spec;
++ spec->gen.hp_automute_hook = alc288_update_headset_jack_cb;
++ }
++}
++
+ static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+ {
+@@ -4431,6 +4629,20 @@ enum {
+ ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
+ ALC282_FIXUP_ASPIRE_V5_PINS,
+ ALC280_FIXUP_HP_GPIO4,
++ ALC286_FIXUP_HP_GPIO_LED,
++ ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
++ ALC280_FIXUP_HP_DOCK_PINS,
++ ALC288_FIXUP_DELL_HEADSET_MODE,
++ ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC288_FIXUP_DELL_XPS_13_GPIO6,
++ ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC275_FIXUP_DELL_XPS,
++ ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
++ ALC293_FIXUP_LENOVO_SPK_NOISE,
++ ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
++ ALC255_FIXUP_DELL_SPK_NOISE,
++ ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC280_FIXUP_HP_HEADSET_MIC,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -4926,6 +5138,117 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc280_fixup_hp_gpio4,
+ },
++ [ALC286_FIXUP_HP_GPIO_LED] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc286_fixup_hp_gpio_led,
++ },
++ [ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc280_fixup_hp_gpio2_mic_hotkey,
++ },
++ [ALC280_FIXUP_HP_DOCK_PINS] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x1b, 0x21011020 }, /* line-out */
++ { 0x1a, 0x01a1903c }, /* headset mic */
++ { 0x18, 0x2181103f }, /* line-in */
++ { },
++ },
++ .chained = true,
++ .chain_id = ALC280_FIXUP_HP_GPIO4
++ },
++ [ALC288_FIXUP_DELL_HEADSET_MODE] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc_fixup_headset_mode_dell_alc288,
++ .chained = true,
++ .chain_id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED
++ },
++ [ALC288_FIXUP_DELL1_MIC_NO_PRESENCE] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
++ { 0x1a, 0x01a1913d }, /* use as headphone mic, without its own jack detect */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC288_FIXUP_DELL_HEADSET_MODE
++ },
++ [ALC288_FIXUP_DELL_XPS_13_GPIO6] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ {0x01, AC_VERB_SET_GPIO_MASK, 0x40},
++ {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x40},
++ {0x01, AC_VERB_SET_GPIO_DATA, 0x00},
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC288_FIXUP_DELL1_MIC_NO_PRESENCE
++ },
++ [ALC298_FIXUP_DELL1_MIC_NO_PRESENCE] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
++ { 0x1a, 0x01a1913d }, /* use as headphone mic, without its own jack detect */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_HEADSET_MODE
++ },
++ [ALC275_FIXUP_DELL_XPS] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ /* Enables internal speaker */
++ {0x20, AC_VERB_SET_COEF_INDEX, 0x1f},
++ {0x20, AC_VERB_SET_PROC_COEF, 0x00c0},
++ {0x20, AC_VERB_SET_COEF_INDEX, 0x30},
++ {0x20, AC_VERB_SET_PROC_COEF, 0x00b1},
++ {}
++ }
++ },
++ [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ /* Disable pass-through path for FRONT 14h */
++ {0x20, AC_VERB_SET_COEF_INDEX, 0x36},
++ {0x20, AC_VERB_SET_PROC_COEF, 0x1737},
++ {}
++ },
++ .chained = true,
++ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
++ },
++ [ALC293_FIXUP_LENOVO_SPK_NOISE] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc_fixup_disable_aamix,
++ .chained = true,
++ .chain_id = ALC269_FIXUP_THINKPAD_ACPI
++ },
++ [ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
++ },
++ [ALC255_FIXUP_DELL_SPK_NOISE] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc_fixup_disable_aamix,
++ .chained = true,
++ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
++ },
++ [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ /* Disable pass-through path for FRONT 14h */
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
++ {}
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
++ },
++ [ALC280_FIXUP_HP_HEADSET_MIC] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc_fixup_disable_aamix,
++ .chained = true,
++ .chain_id = ALC269_FIXUP_HEADSET_MIC,
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -4937,9 +5260,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
++ SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+ SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+ SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+ SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
++ SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
+ SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER),
+ SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+@@ -4952,11 +5277,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
++ SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+ SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
++ SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
+ /* ALC282 */
+ SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+@@ -4970,6 +5298,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++ SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
+ SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+@@ -5007,6 +5337,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++ SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -5053,6 +5384,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
++ SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -5062,6 +5395,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+@@ -5142,6 +5476,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
+ {}
+ };
++#define ALC225_STANDARD_PINS \
++ {0x12, 0xb7a60130}, \
++ {0x21, 0x04211020}
+
+ #define ALC255_STANDARD_PINS \
+ {0x18, 0x411111f0}, \
+@@ -5167,6 +5504,13 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ {0x1b, 0x411111f0}, \
+ {0x1e, 0x411111f0}
+
++#define ALC288_STANDARD_PINS \
++ {0x17, 0x411111f0}, \
++ {0x18, 0x411111f0}, \
++ {0x19, 0x411111f0}, \
++ {0x1a, 0x411111f0}, \
++ {0x1e, 0x411111f0}
++
+ #define ALC290_STANDARD_PINS \
+ {0x12, 0x99a30130}, \
+ {0x13, 0x40000000}, \
+@@ -5184,7 +5528,26 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ {0x1d, 0x40700001}, \
+ {0x1e, 0x411111f0}
+
++#define ALC298_STANDARD_PINS \
++ {0x18, 0x411111f0}, \
++ {0x19, 0x411111f0}, \
++ {0x1a, 0x411111f0}, \
++ {0x1e, 0x411111f0}, \
++ {0x1f, 0x411111f0}
++
+ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC225_STANDARD_PINS,
++ {0x14, 0x901701a0}),
++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC225_STANDARD_PINS,
++ {0x14, 0x901701b0}),
++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC225_STANDARD_PINS,
++ {0x14, 0x901701a0}),
++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC225_STANDARD_PINS,
++ {0x14, 0x901701b0}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
+ ALC255_STANDARD_PINS,
+ {0x12, 0x40300000},
+@@ -5365,6 +5728,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x19, 0x03a11020},
+ {0x1d, 0x40e00001},
+ {0x21, 0x0321101f}),
++ SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL_XPS_13_GPIO6,
++ ALC288_STANDARD_PINS,
++ {0x12, 0x90a60120},
++ {0x13, 0x40000000},
++ {0x14, 0x90170110},
++ {0x1d, 0x4076832d},
++ {0x21, 0x0321101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC290_STANDARD_PINS,
+ {0x14, 0x411111f0},
+@@ -5449,6 +5819,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x16, 0x411111f0},
+ {0x18, 0x411111f0},
+ {0x19, 0x411111f0}),
++ SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC298_STANDARD_PINS,
++ {0x12, 0x90a60130},
++ {0x13, 0x40000000},
++ {0x14, 0x411111f0},
++ {0x17, 0x90170140},
++ {0x1d, 0x4068a36d},
++ {0x21, 0x03211020}),
+ {}
+ };
+
+@@ -5905,22 +6283,6 @@ static void alc_fixup_bass_chmap(struct hda_codec *codec,
+ }
+ }
+
+-/* turn on/off mute LED per vmaster hook */
+-static void alc662_led_gpio1_mute_hook(void *private_data, int enabled)
+-{
+- struct hda_codec *codec = private_data;
+- struct alc_spec *spec = codec->spec;
+- unsigned int oldval = spec->gpio_led;
+-
+- if (enabled)
+- spec->gpio_led |= 0x01;
+- else
+- spec->gpio_led &= ~0x01;
+- if (spec->gpio_led != oldval)
+- snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
+- spec->gpio_led);
+-}
+-
+ /* avoid D3 for keeping GPIO up */
+ static unsigned int gpio_led_power_filter(struct hda_codec *codec,
+ hda_nid_t nid,
+@@ -5943,8 +6305,10 @@ static void alc662_fixup_led_gpio1(struct hda_codec *codec,
+ };
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+- spec->gen.vmaster_mute.hook = alc662_led_gpio1_mute_hook;
++ spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
+ spec->gpio_led = 0;
++ spec->mute_led_polarity = 1;
++ spec->gpio_mute_led_mask = 0x01;
+ snd_hda_add_verbs(codec, gpio_init);
+ codec->power_filter = gpio_led_power_filter;
+ }
+diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
+index 7646ba1664eb..799fe9a81552 100644
+--- a/sound/pci/rme9652/hdsp.c
++++ b/sound/pci/rme9652/hdsp.c
+@@ -2927,7 +2927,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
+ {
+ struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+
+- ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp);
++ ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp);
+ return 0;
+ }
+
+@@ -2939,7 +2939,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
+
+ if (!snd_hdsp_use_is_exclusive(hdsp))
+ return -EBUSY;
+- val = ucontrol->value.enumerated.item[0];
++ val = ucontrol->value.integer.value[0];
+ spin_lock_irq(&hdsp->lock);
+ if (val != hdsp_dds_offset(hdsp))
+ change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0;
+diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
+index fcf91ee0328d..120fb18dd92b 100644
+--- a/sound/pci/rme9652/hdspm.c
++++ b/sound/pci/rme9652/hdspm.c
+@@ -1602,6 +1602,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate)
+ {
+ u64 n;
+
++ if (snd_BUG_ON(rate <= 0))
++ return;
++
+ if (rate >= 112000)
+ rate /= 4;
+ else if (rate >= 56000)
+@@ -2220,6 +2223,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
+ } else {
+ /* slave mode, return external sample rate */
+ rate = hdspm_external_sample_rate(hdspm);
++ if (!rate)
++ rate = hdspm->system_sample_rate;
+ }
+ }
+
+@@ -2265,8 +2270,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol,
+ ucontrol)
+ {
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
++ int rate = ucontrol->value.integer.value[0];
+
+- hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]);
++ if (rate < 27000 || rate > 207000)
++ return -EINVAL;
++ hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]);
+ return 0;
+ }
+
+@@ -4465,7 +4473,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol,
+ {
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+- ucontrol->value.enumerated.item[0] = hdspm->tco->term;
++ ucontrol->value.integer.value[0] = hdspm->tco->term;
+
+ return 0;
+ }
+@@ -4476,8 +4484,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol,
+ {
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+- if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) {
+- hdspm->tco->term = ucontrol->value.enumerated.item[0];
++ if (hdspm->tco->term != ucontrol->value.integer.value[0]) {
++ hdspm->tco->term = ucontrol->value.integer.value[0];
+
+ hdspm_tco_write(hdspm);
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 2ef679caa8b9..42c1d0171a5a 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1111,6 +1111,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
+ case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+ case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
++ case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
+ case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+ return true;
+ }
+diff --git a/tools/Makefile b/tools/Makefile
+index 9a617adc6675..f0fd70a5bd68 100644
+--- a/tools/Makefile
++++ b/tools/Makefile
+@@ -25,6 +25,10 @@ help:
+ @echo ' from the kernel command line to build and install one of'
+ @echo ' the tools above'
+ @echo ''
++ @echo ' $$ make tools/all'
++ @echo ''
++ @echo ' builds all tools.'
++ @echo ''
+ @echo ' $$ make tools/install'
+ @echo ''
+ @echo ' installs all tools.'
+@@ -62,6 +66,11 @@ turbostat x86_energy_perf_policy: FORCE
+ tmon: FORCE
+ $(call descend,thermal/$@)
+
++all: acpi cgroup cpupower hv firewire lguest \
++ perf selftests turbostat usb \
++ virtio vm net x86_energy_perf_policy \
++ tmon
++
+ acpi_install:
+ $(call descend,power/$(@:_install=),install)
+
+diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
+index d7cf2ffc56e9..68dd2df0d51b 100644
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1758,8 +1758,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
+ {
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+-
+- int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
++ int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
++ int sz = nr_longs * sizeof(unsigned long);
+ vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
+ vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
+
+diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
+index 5ff7f7f2689a..e06785470408 100644
+--- a/virt/kvm/async_pf.c
++++ b/virt/kvm/async_pf.c
+@@ -169,7 +169,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
+ * do alloc nowait since if we are going to sleep anyway we
+ * may as well sleep faulting in page
+ */
+- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
++ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
+ if (!work)
+ return 0;
+