summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2016-03-05 16:07:59 -0500
committerMike Pagano <mpagano@gentoo.org>2016-03-05 16:07:59 -0500
commitf4b15f5e246692e7b269d9274d09ec521d49f4b1 (patch)
tree0c92ce34f9a8a92d69ce677be52c82865c4deb0b
parentLinux patch 3.18.27 (diff)
downloadlinux-patches-f4b15f5e246692e7b269d9274d09ec521d49f4b1.tar.gz
linux-patches-f4b15f5e246692e7b269d9274d09ec521d49f4b1.tar.bz2
linux-patches-f4b15f5e246692e7b269d9274d09ec521d49f4b1.zip
Linux patch 3.18.283.18-29
-rw-r--r--0000_README4
-rw-r--r--1027_linux-3.18.28.patch4935
2 files changed, 4939 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 0da11e35..124258e8 100644
--- a/0000_README
+++ b/0000_README
@@ -151,6 +151,10 @@ Patch: 1026_linux-3.18.27.patch
From: http://www.kernel.org
Desc: Linux 3.18.27
+Patch: 1027_linux-3.18.28.patch
+From: http://www.kernel.org
+Desc: Linux 3.18.28
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1027_linux-3.18.28.patch b/1027_linux-3.18.28.patch
new file mode 100644
index 00000000..8597d310
--- /dev/null
+++ b/1027_linux-3.18.28.patch
@@ -0,0 +1,4935 @@
+diff --git a/Makefile b/Makefile
+index 2393cc5e0229..f849f29ce405 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 18
+-SUBLEVEL = 27
++SUBLEVEL = 28
+ EXTRAVERSION =
+ NAME = Diseased Newt
+
+diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
+index 2dc6da70ae59..d7ed252708c5 100644
+--- a/arch/arm/common/icst.c
++++ b/arch/arm/common/icst.c
+@@ -16,7 +16,7 @@
+ */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+-
++#include <asm/div64.h>
+ #include <asm/hardware/icst.h>
+
+ /*
+@@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div);
+
+ unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
+ {
+- return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
++ u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
++ u32 divisor = (vco.r + 2) * p->s2div[vco.s];
++
++ do_div(dividend, divisor);
++ return (unsigned long)dividend;
+ }
+
+ EXPORT_SYMBOL(icst_hz);
+@@ -58,6 +62,7 @@ icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
+
+ if (f > p->vco_min && f <= p->vco_max)
+ break;
++ i++;
+ } while (i < 8);
+
+ if (i >= 8)
+diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
+index bb7963753730..b81d3bafbcc2 100644
+--- a/arch/mips/include/asm/syscall.h
++++ b/arch/mips/include/asm/syscall.h
+@@ -107,10 +107,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
+ /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
+ if ((config_enabled(CONFIG_32BIT) ||
+ test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
+- (regs->regs[2] == __NR_syscall)) {
++ (regs->regs[2] == __NR_syscall))
+ i++;
+- n++;
+- }
+
+ while (n--)
+ ret |= mips_get_syscall_arg(args++, task, regs, i++);
+diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
+index 1d19e7917d7f..794815ff139c 100644
+--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
++++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
+@@ -39,11 +39,11 @@
+ #include "pci.h"
+
+ /**
+- * powernv_eeh_init - EEH platform dependent initialization
++ * pnv_eeh_init - EEH platform dependent initialization
+ *
+ * EEH platform dependent initialization on powernv
+ */
+-static int powernv_eeh_init(void)
++static int pnv_eeh_init(void)
+ {
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+@@ -75,14 +75,14 @@ static int powernv_eeh_init(void)
+ }
+
+ /**
+- * powernv_eeh_post_init - EEH platform dependent post initialization
++ * pnv_eeh_post_init - EEH platform dependent post initialization
+ *
+ * EEH platform dependent post initialization on powernv. When
+ * the function is called, the EEH PEs and devices should have
+ * been built. If the I/O cache staff has been built, EEH is
+ * ready to supply service.
+ */
+-static int powernv_eeh_post_init(void)
++static int pnv_eeh_post_init(void)
+ {
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+@@ -102,7 +102,7 @@ static int powernv_eeh_post_init(void)
+ }
+
+ /**
+- * powernv_eeh_dev_probe - Do probe on PCI device
++ * pnv_eeh_dev_probe - Do probe on PCI device
+ * @dev: PCI device
+ * @flag: unused
+ *
+@@ -118,7 +118,7 @@ static int powernv_eeh_post_init(void)
+ * was possiblly triggered by EEH core, the binding between EEH device
+ * and the PCI device isn't built yet.
+ */
+-static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
++static int pnv_eeh_dev_probe(struct pci_dev *dev, void *flag)
+ {
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+@@ -210,7 +210,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
+ }
+
+ /**
+- * powernv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
++ * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
+ * @pe: EEH PE
+ * @option: operation to be issued
+ *
+@@ -218,7 +218,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
+ * Currently, following options are support according to PAPR:
+ * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
+ */
+-static int powernv_eeh_set_option(struct eeh_pe *pe, int option)
++static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
+ {
+ struct pci_controller *hose = pe->phb;
+ struct pnv_phb *phb = hose->private_data;
+@@ -235,19 +235,19 @@ static int powernv_eeh_set_option(struct eeh_pe *pe, int option)
+ }
+
+ /**
+- * powernv_eeh_get_pe_addr - Retrieve PE address
++ * pnv_eeh_get_pe_addr - Retrieve PE address
+ * @pe: EEH PE
+ *
+ * Retrieve the PE address according to the given tranditional
+ * PCI BDF (Bus/Device/Function) address.
+ */
+-static int powernv_eeh_get_pe_addr(struct eeh_pe *pe)
++static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
+ {
+ return pe->addr;
+ }
+
+ /**
+- * powernv_eeh_get_state - Retrieve PE state
++ * pnv_eeh_get_state - Retrieve PE state
+ * @pe: EEH PE
+ * @delay: delay while PE state is temporarily unavailable
+ *
+@@ -256,7 +256,7 @@ static int powernv_eeh_get_pe_addr(struct eeh_pe *pe)
+ * we prefer passing down to hardware implementation to handle
+ * it.
+ */
+-static int powernv_eeh_get_state(struct eeh_pe *pe, int *delay)
++static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
+ {
+ struct pci_controller *hose = pe->phb;
+ struct pnv_phb *phb = hose->private_data;
+@@ -281,13 +281,13 @@ static int powernv_eeh_get_state(struct eeh_pe *pe, int *delay)
+ }
+
+ /**
+- * powernv_eeh_reset - Reset the specified PE
++ * pnv_eeh_reset - Reset the specified PE
+ * @pe: EEH PE
+ * @option: reset option
+ *
+ * Reset the specified PE
+ */
+-static int powernv_eeh_reset(struct eeh_pe *pe, int option)
++static int pnv_eeh_reset(struct eeh_pe *pe, int option)
+ {
+ struct pci_controller *hose = pe->phb;
+ struct pnv_phb *phb = hose->private_data;
+@@ -300,20 +300,20 @@ static int powernv_eeh_reset(struct eeh_pe *pe, int option)
+ }
+
+ /**
+- * powernv_eeh_wait_state - Wait for PE state
++ * pnv_eeh_wait_state - Wait for PE state
+ * @pe: EEH PE
+ * @max_wait: maximal period in microsecond
+ *
+ * Wait for the state of associated PE. It might take some time
+ * to retrieve the PE's state.
+ */
+-static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
++static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
+ {
+ int ret;
+ int mwait;
+
+ while (1) {
+- ret = powernv_eeh_get_state(pe, &mwait);
++ ret = pnv_eeh_get_state(pe, &mwait);
+
+ /*
+ * If the PE's state is temporarily unavailable,
+@@ -337,7 +337,7 @@ static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
+ }
+
+ /**
+- * powernv_eeh_get_log - Retrieve error log
++ * pnv_eeh_get_log - Retrieve error log
+ * @pe: EEH PE
+ * @severity: temporary or permanent error log
+ * @drv_log: driver log to be combined with retrieved error log
+@@ -345,8 +345,8 @@ static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
+ *
+ * Retrieve the temporary or permanent error from the PE.
+ */
+-static int powernv_eeh_get_log(struct eeh_pe *pe, int severity,
+- char *drv_log, unsigned long len)
++static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
++ char *drv_log, unsigned long len)
+ {
+ struct pci_controller *hose = pe->phb;
+ struct pnv_phb *phb = hose->private_data;
+@@ -359,14 +359,14 @@ static int powernv_eeh_get_log(struct eeh_pe *pe, int severity,
+ }
+
+ /**
+- * powernv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
++ * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
+ * @pe: EEH PE
+ *
+ * The function will be called to reconfigure the bridges included
+ * in the specified PE so that the mulfunctional PE would be recovered
+ * again.
+ */
+-static int powernv_eeh_configure_bridge(struct eeh_pe *pe)
++static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
+ {
+ struct pci_controller *hose = pe->phb;
+ struct pnv_phb *phb = hose->private_data;
+@@ -379,7 +379,7 @@ static int powernv_eeh_configure_bridge(struct eeh_pe *pe)
+ }
+
+ /**
+- * powernv_pe_err_inject - Inject specified error to the indicated PE
++ * pnv_pe_err_inject - Inject specified error to the indicated PE
+ * @pe: the indicated PE
+ * @type: error type
+ * @func: specific error type
+@@ -390,8 +390,8 @@ static int powernv_eeh_configure_bridge(struct eeh_pe *pe)
+ * determined by @type and @func, to the indicated PE for
+ * testing purpose.
+ */
+-static int powernv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
+- unsigned long addr, unsigned long mask)
++static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
++ unsigned long addr, unsigned long mask)
+ {
+ struct pci_controller *hose = pe->phb;
+ struct pnv_phb *phb = hose->private_data;
+@@ -403,7 +403,7 @@ static int powernv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
+ return ret;
+ }
+
+-static inline bool powernv_eeh_cfg_blocked(struct device_node *dn)
++static inline bool pnv_eeh_cfg_blocked(struct device_node *dn)
+ {
+ struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+
+@@ -416,10 +416,10 @@ static inline bool powernv_eeh_cfg_blocked(struct device_node *dn)
+ return false;
+ }
+
+-static int powernv_eeh_read_config(struct device_node *dn,
+- int where, int size, u32 *val)
++static int pnv_eeh_read_config(struct device_node *dn,
++ int where, int size, u32 *val)
+ {
+- if (powernv_eeh_cfg_blocked(dn)) {
++ if (pnv_eeh_cfg_blocked(dn)) {
+ *val = 0xFFFFFFFF;
+ return PCIBIOS_SET_FAILED;
+ }
+@@ -427,22 +427,22 @@ static int powernv_eeh_read_config(struct device_node *dn,
+ return pnv_pci_cfg_read(dn, where, size, val);
+ }
+
+-static int powernv_eeh_write_config(struct device_node *dn,
+- int where, int size, u32 val)
++static int pnv_eeh_write_config(struct device_node *dn,
++ int where, int size, u32 val)
+ {
+- if (powernv_eeh_cfg_blocked(dn))
++ if (pnv_eeh_cfg_blocked(dn))
+ return PCIBIOS_SET_FAILED;
+
+ return pnv_pci_cfg_write(dn, where, size, val);
+ }
+
+ /**
+- * powernv_eeh_next_error - Retrieve next EEH error to handle
++ * pnv_eeh_next_error - Retrieve next EEH error to handle
+ * @pe: Affected PE
+ *
+ * Using OPAL API, to retrieve next EEH error for EEH core to handle
+ */
+-static int powernv_eeh_next_error(struct eeh_pe **pe)
++static int pnv_eeh_next_error(struct eeh_pe **pe)
+ {
+ struct pci_controller *hose;
+ struct pnv_phb *phb = NULL;
+@@ -458,7 +458,7 @@ static int powernv_eeh_next_error(struct eeh_pe **pe)
+ return -EEXIST;
+ }
+
+-static int powernv_eeh_restore_config(struct device_node *dn)
++static int pnv_eeh_restore_config(struct device_node *dn)
+ {
+ struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct pnv_phb *phb;
+@@ -479,24 +479,24 @@ static int powernv_eeh_restore_config(struct device_node *dn)
+ return 0;
+ }
+
+-static struct eeh_ops powernv_eeh_ops = {
++static struct eeh_ops pnv_eeh_ops = {
+ .name = "powernv",
+- .init = powernv_eeh_init,
+- .post_init = powernv_eeh_post_init,
++ .init = pnv_eeh_init,
++ .post_init = pnv_eeh_post_init,
+ .of_probe = NULL,
+- .dev_probe = powernv_eeh_dev_probe,
+- .set_option = powernv_eeh_set_option,
+- .get_pe_addr = powernv_eeh_get_pe_addr,
+- .get_state = powernv_eeh_get_state,
+- .reset = powernv_eeh_reset,
+- .wait_state = powernv_eeh_wait_state,
+- .get_log = powernv_eeh_get_log,
+- .configure_bridge = powernv_eeh_configure_bridge,
+- .err_inject = powernv_eeh_err_inject,
+- .read_config = powernv_eeh_read_config,
+- .write_config = powernv_eeh_write_config,
+- .next_error = powernv_eeh_next_error,
+- .restore_config = powernv_eeh_restore_config
++ .dev_probe = pnv_eeh_dev_probe,
++ .set_option = pnv_eeh_set_option,
++ .get_pe_addr = pnv_eeh_get_pe_addr,
++ .get_state = pnv_eeh_get_state,
++ .reset = pnv_eeh_reset,
++ .wait_state = pnv_eeh_wait_state,
++ .get_log = pnv_eeh_get_log,
++ .configure_bridge = pnv_eeh_configure_bridge,
++ .err_inject = pnv_eeh_err_inject,
++ .read_config = pnv_eeh_read_config,
++ .write_config = pnv_eeh_write_config,
++ .next_error = pnv_eeh_next_error,
++ .restore_config = pnv_eeh_restore_config
+ };
+
+ /**
+@@ -510,7 +510,7 @@ static int __init eeh_powernv_init(void)
+ int ret = -EINVAL;
+
+ eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
+- ret = eeh_ops_register(&powernv_eeh_ops);
++ ret = eeh_ops_register(&pnv_eeh_ops);
+ if (!ret)
+ pr_info("EEH: PowerNV platform initialized\n");
+ else
+diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
+index c90af2537d24..c571d85cfad7 100644
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -483,6 +483,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ if (link->dump == NULL)
+ return -EINVAL;
+
++ down_read(&crypto_alg_sem);
+ list_for_each_entry(alg, &crypto_alg_list, cra_list)
+ dump_alloc += CRYPTO_REPORT_MAXSIZE;
+
+@@ -492,8 +493,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ .done = link->done,
+ .min_dump_alloc = dump_alloc,
+ };
+- return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
++ err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
+ }
++ up_read(&crypto_alg_sem);
++
++ return err;
+ }
+
+ err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 73187771836c..62dcd80ec2c0 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -262,6 +262,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index de88999521b7..a1d1c0e16697 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -495,8 +495,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
+ }
+ }
+
+- /* fabricate port_map from cap.nr_ports */
+- if (!port_map) {
++ /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
++ if (!port_map && vers < 0x10300) {
+ port_map = (1 << ahci_nr_ports(cap)) - 1;
+ dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
+
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index 2e86e3b85266..12d337754e4a 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
+ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ {
+ struct ata_port *ap = qc->ap;
+- unsigned long flags;
+
+ if (ap->ops->error_handler) {
+ if (in_wq) {
+- spin_lock_irqsave(ap->lock, flags);
+-
+ /* EH might have kicked in while host lock is
+ * released.
+ */
+@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ } else
+ ata_port_freeze(ap);
+ }
+-
+- spin_unlock_irqrestore(ap->lock, flags);
+ } else {
+ if (likely(!(qc->err_mask & AC_ERR_HSM)))
+ ata_qc_complete(qc);
+@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ }
+ } else {
+ if (in_wq) {
+- spin_lock_irqsave(ap->lock, flags);
+ ata_sff_irq_on(ap);
+ ata_qc_complete(qc);
+- spin_unlock_irqrestore(ap->lock, flags);
+ } else
+ ata_qc_complete(qc);
+ }
+@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
+ {
+ struct ata_link *link = qc->dev->link;
+ struct ata_eh_info *ehi = &link->eh_info;
+- unsigned long flags = 0;
+ int poll_next;
+
++ lockdep_assert_held(ap->lock);
++
+ WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
+
+ /* Make sure ata_sff_qc_issue() does not throw things
+@@ -1112,14 +1106,6 @@ fsm_start:
+ }
+ }
+
+- /* Send the CDB (atapi) or the first data block (ata pio out).
+- * During the state transition, interrupt handler shouldn't
+- * be invoked before the data transfer is complete and
+- * hsm_task_state is changed. Hence, the following locking.
+- */
+- if (in_wq)
+- spin_lock_irqsave(ap->lock, flags);
+-
+ if (qc->tf.protocol == ATA_PROT_PIO) {
+ /* PIO data out protocol.
+ * send first data block.
+@@ -1135,9 +1121,6 @@ fsm_start:
+ /* send CDB */
+ atapi_send_cdb(ap, qc);
+
+- if (in_wq)
+- spin_unlock_irqrestore(ap->lock, flags);
+-
+ /* if polling, ata_sff_pio_task() handles the rest.
+ * otherwise, interrupt handler takes over from here.
+ */
+@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
+ u8 status;
+ int poll_next;
+
++ spin_lock_irq(ap->lock);
++
+ BUG_ON(ap->sff_pio_task_link == NULL);
+ /* qc can be NULL if timeout occurred */
+ qc = ata_qc_from_tag(ap, link->active_tag);
+ if (!qc) {
+ ap->sff_pio_task_link = NULL;
+- return;
++ goto out_unlock;
+ }
+
+ fsm_start:
+@@ -1381,11 +1366,14 @@ fsm_start:
+ */
+ status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
+ if (status & ATA_BUSY) {
++ spin_unlock_irq(ap->lock);
+ ata_msleep(ap, 2);
++ spin_lock_irq(ap->lock);
++
+ status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
+ if (status & ATA_BUSY) {
+ ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
+- return;
++ goto out_unlock;
+ }
+ }
+
+@@ -1402,6 +1390,8 @@ fsm_start:
+ */
+ if (poll_next)
+ goto fsm_start;
++out_unlock:
++ spin_unlock_irq(ap->lock);
+ }
+
+ /**
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index ec233a5888e8..d95e1d0fcc18 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -148,7 +148,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
+
+ /* Enable interrupts */
+ channel_set_bit(dw, MASK.XFER, dwc->mask);
+- channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ channel_set_bit(dw, MASK.ERROR, dwc->mask);
+
+ dwc->initialized = true;
+@@ -580,6 +579,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ }
++
++ /* Re-enable interrupts */
++ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ }
+
+ /* ------------------------------------------------------------------------- */
+@@ -610,11 +612,8 @@ static void dw_dma_tasklet(unsigned long data)
+ dwc_scan_descriptors(dw, dwc);
+ }
+
+- /*
+- * Re-enable interrupts.
+- */
++ /* Re-enable interrupts */
+ channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
+- channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
+ }
+
+@@ -1249,6 +1248,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+ int dw_dma_cyclic_start(struct dma_chan *chan)
+ {
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
++ struct dw_dma *dw = to_dw_dma(chan->device);
+ unsigned long flags;
+
+ if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+@@ -1257,7 +1257,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
+ }
+
+ spin_lock_irqsave(&dwc->lock, flags);
++
++ /* Enable interrupts to perform cyclic transfer */
++ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
++
+ dwc_dostart(dwc, dwc->cdesc->desc[0]);
++
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+index f6bdd44069ce..6ea603ae9055 100644
+--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
++++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+@@ -171,7 +171,12 @@ static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
+ gpio = *data++;
+
+ /* pull up/down */
+- action = *data++;
++ action = *data++ & 1;
++
++ if (gpio >= ARRAY_SIZE(gtable)) {
++ DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
++ goto out;
++ }
+
+ function = gtable[gpio].function_reg;
+ pad = gtable[gpio].pad_reg;
+@@ -190,6 +195,7 @@ static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
+ vlv_gpio_nc_write(dev_priv, pad, val);
+ mutex_unlock(&dev_priv->dpio_lock);
+
++out:
+ return data;
+ }
+
+diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
+index 7354a4cda59d..3aefaa058f0c 100644
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
+ cmd->command_size))
+ return -EFAULT;
+
+- reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
++ reloc_info = kmalloc_array(cmd->relocs_num,
++ sizeof(struct qxl_reloc_info), GFP_KERNEL);
+ if (!reloc_info)
+ return -ENOMEM;
+
+diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
+index c507896aca45..197b157b73d0 100644
+--- a/drivers/gpu/drm/radeon/radeon_sa.c
++++ b/drivers/gpu/drm/radeon/radeon_sa.c
+@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
+ /* see if we can skip over some allocations */
+ } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+
++ for (i = 0; i < RADEON_NUM_RINGS; ++i)
++ radeon_fence_ref(fences[i]);
++
+ spin_unlock(&sa_manager->wq.lock);
+ r = radeon_fence_wait_any(rdev, fences, false);
++ for (i = 0; i < RADEON_NUM_RINGS; ++i)
++ radeon_fence_unref(&fences[i]);
+ spin_lock(&sa_manager->wq.lock);
+ /* if we have nothing to wait for block */
+ if (r == -ENOENT) {
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 13f69472e716..33928b71445b 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -735,7 +735,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+- while (--i) {
++ while (i--) {
+ pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ gtt->ttm.dma_address[i] = 0;
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 357206a20017..041ade6ea565 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -1334,7 +1334,7 @@ sequence_cmd:
+ if (!rc && dump_payload == false && unsol_data)
+ iscsit_set_unsoliticed_dataout(cmd);
+ else if (dump_payload && imm_data)
+- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
++ target_put_sess_cmd(&cmd->se_cmd);
+
+ return 0;
+ }
+@@ -1753,7 +1753,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
+ cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+- target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++ target_put_sess_cmd(se_cmd);
+ }
+ }
+
+@@ -1922,7 +1922,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (ret) {
+- target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++ target_put_sess_cmd(se_cmd);
+ transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->pi_err, 0);
+ } else {
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index dc829682701a..ad4af66a4cbb 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -1335,7 +1335,7 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
+
+ BUG_ON(ch->sess == NULL);
+
+- target_put_sess_cmd(ch->sess, &ioctx->cmd);
++ target_put_sess_cmd(&ioctx->cmd);
+ goto out;
+ }
+
+@@ -1366,11 +1366,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
+ * not been received in time.
+ */
+ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+- target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
++ target_put_sess_cmd(&ioctx->cmd);
+ break;
+ case SRPT_STATE_MGMT_RSP_SENT:
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+- target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
++ target_put_sess_cmd(&ioctx->cmd);
+ break;
+ default:
+ WARN(1, "Unexpected command state (%d)", state);
+@@ -1682,7 +1682,7 @@ static int srpt_check_stop_free(struct se_cmd *cmd)
+ struct srpt_send_ioctx *ioctx = container_of(cmd,
+ struct srpt_send_ioctx, cmd);
+
+- return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
++ return target_put_sess_cmd(&ioctx->cmd);
+ }
+
+ /**
+@@ -3079,7 +3079,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
+ ioctx->tag);
+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+- target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
++ target_put_sess_cmd(&ioctx->cmd);
+ }
+ }
+
+diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
+index c5c61cabd6e3..8a0643ae1fd9 100644
+--- a/drivers/iommu/dmar.c
++++ b/drivers/iommu/dmar.c
+@@ -1272,7 +1272,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
+
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+
+- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
++ sts = readl(iommu->reg + DMAR_GSTS_REG);
+ if (!(sts & DMA_GSTS_QIES))
+ goto end;
+
+diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
+index 7c80661b35c1..f2ed4ee201d6 100644
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -531,7 +531,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
+
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+
+- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
++ sts = readl(iommu->reg + DMAR_GSTS_REG);
+ if (!(sts & DMA_GSTS_IRES))
+ goto end;
+
+diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
+index ee932004724f..e15b4845f7c6 100644
+--- a/drivers/parport/parport_serial.c
++++ b/drivers/parport/parport_serial.c
+@@ -64,6 +64,7 @@ enum parport_pc_pci_cards {
+ timedia_9079c,
+ wch_ch353_1s1p,
+ wch_ch353_2s1p,
++ wch_ch382_2s1p,
+ sunix_2s1p,
+ };
+
+@@ -151,6 +152,7 @@ static struct parport_pc_pci cards[] = {
+ /* timedia_9079c */ { 1, { { 2, 3 }, } },
+ /* wch_ch353_1s1p*/ { 1, { { 1, -1}, } },
+ /* wch_ch353_2s1p*/ { 1, { { 2, -1}, } },
++ /* wch_ch382_2s1p*/ { 1, { { 2, -1}, } },
+ /* sunix_2s1p */ { 1, { { 3, -1 }, } },
+ };
+
+@@ -257,6 +259,7 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
+ /* WCH CARDS */
+ { 0x4348, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p},
+ { 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p},
++ { 0x1c00, 0x3250, 0x1c00, 0x3250, 0, 0, wch_ch382_2s1p},
+
+ /*
+ * More SUNIX variations. At least one of these has part number
+@@ -494,6 +497,13 @@ static struct pciserial_board pci_parport_serial_boards[] = {
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
++ [wch_ch382_2s1p] = {
++ .flags = FL_BASE0,
++ .num_ports = 2,
++ .base_baud = 115200,
++ .uart_offset = 8,
++ .first_offset = 0xC0,
++ },
+ [sunix_2s1p] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 2,
+diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
+index 0bf82a20a0fb..48d21e0edd56 100644
+--- a/drivers/pci/pcie/aer/aerdrv.c
++++ b/drivers/pci/pcie/aer/aerdrv.c
+@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
+ rpc->rpd = dev;
+ INIT_WORK(&rpc->dpc_handler, aer_isr);
+ mutex_init(&rpc->rpc_mutex);
+- init_waitqueue_head(&rpc->wait_release);
+
+ /* Use PCIe bus function to store rpc into PCIe device */
+ set_service_data(dev, rpc);
+@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
+ if (rpc->isr)
+ free_irq(dev->irq, dev);
+
+- wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
+-
++ flush_work(&rpc->dpc_handler);
+ aer_disable_rootport(rpc);
+ kfree(rpc);
+ set_service_data(dev, NULL);
+diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
+index 84420b7c9456..945c939a86c5 100644
+--- a/drivers/pci/pcie/aer/aerdrv.h
++++ b/drivers/pci/pcie/aer/aerdrv.h
+@@ -72,7 +72,6 @@ struct aer_rpc {
+ * recovery on the same
+ * root port hierarchy
+ */
+- wait_queue_head_t wait_release;
+ };
+
+ struct aer_broadcast_data {
+diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
+index 5653ea94547f..b60a325234c5 100644
+--- a/drivers/pci/pcie/aer/aerdrv_core.c
++++ b/drivers/pci/pcie/aer/aerdrv_core.c
+@@ -784,8 +784,6 @@ void aer_isr(struct work_struct *work)
+ while (get_e_source(rpc, &e_src))
+ aer_isr_one_error(p_device, &e_src);
+ mutex_unlock(&rpc->rpc_mutex);
+-
+- wake_up(&rpc->wait_release);
+ }
+
+ /**
+diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
+index 2733112b3527..75c62907b99d 100644
+--- a/drivers/phy/phy-core.c
++++ b/drivers/phy/phy-core.c
+@@ -179,6 +179,7 @@ int phy_init(struct phy *phy)
+ ret = phy_pm_runtime_get_sync(phy);
+ if (ret < 0 && ret != -ENOTSUPP)
+ return ret;
++ ret = 0; /* Override possible ret == -ENOTSUPP */
+
+ mutex_lock(&phy->mutex);
+ if (phy->init_count == 0 && phy->ops->init) {
+@@ -187,8 +188,6 @@ int phy_init(struct phy *phy)
+ dev_err(&phy->dev, "phy init failed --> %d\n", ret);
+ goto out;
+ }
+- } else {
+- ret = 0; /* Override possible ret == -ENOTSUPP */
+ }
+ ++phy->init_count;
+
+@@ -209,6 +208,7 @@ int phy_exit(struct phy *phy)
+ ret = phy_pm_runtime_get_sync(phy);
+ if (ret < 0 && ret != -ENOTSUPP)
+ return ret;
++ ret = 0; /* Override possible ret == -ENOTSUPP */
+
+ mutex_lock(&phy->mutex);
+ if (phy->init_count == 1 && phy->ops->exit) {
+@@ -229,41 +229,42 @@ EXPORT_SYMBOL_GPL(phy_exit);
+
+ int phy_power_on(struct phy *phy)
+ {
+- int ret;
++ int ret = 0;
+
+ if (!phy)
+- return 0;
++ goto out;
+
+ if (phy->pwr) {
+ ret = regulator_enable(phy->pwr);
+ if (ret)
+- return ret;
++ goto out;
+ }
+
+ ret = phy_pm_runtime_get_sync(phy);
+ if (ret < 0 && ret != -ENOTSUPP)
+- return ret;
++ goto err_pm_sync;
++
++ ret = 0; /* Override possible ret == -ENOTSUPP */
+
+ mutex_lock(&phy->mutex);
+ if (phy->power_count == 0 && phy->ops->power_on) {
+ ret = phy->ops->power_on(phy);
+ if (ret < 0) {
+ dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
+- goto out;
++ goto err_pwr_on;
+ }
+- } else {
+- ret = 0; /* Override possible ret == -ENOTSUPP */
+ }
+ ++phy->power_count;
+ mutex_unlock(&phy->mutex);
+ return 0;
+
+-out:
++err_pwr_on:
+ mutex_unlock(&phy->mutex);
+ phy_pm_runtime_put_sync(phy);
++err_pm_sync:
+ if (phy->pwr)
+ regulator_disable(phy->pwr);
+-
++out:
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(phy_power_on);
+diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
+index 13dd070a69da..6ab230f1f513 100644
+--- a/drivers/phy/phy-twl4030-usb.c
++++ b/drivers/phy/phy-twl4030-usb.c
+@@ -753,6 +753,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
+ struct twl4030_usb *twl = platform_get_drvdata(pdev);
+ int val;
+
++ usb_remove_phy(&twl->phy);
+ pm_runtime_get_sync(twl->dev);
+ cancel_delayed_work(&twl->id_workaround_work);
+ device_remove_file(twl->dev, &dev_attr_vbus);
+@@ -760,6 +761,13 @@ static int twl4030_usb_remove(struct platform_device *pdev)
+ /* set transceiver mode to power on defaults */
+ twl4030_usb_set_mode(twl, -1);
+
++ /* idle ulpi before powering off */
++ if (cable_present(twl->linkstat))
++ pm_runtime_put_noidle(twl->dev);
++ pm_runtime_mark_last_busy(twl->dev);
++ pm_runtime_put_sync_suspend(twl->dev);
++ pm_runtime_disable(twl->dev);
++
+ /* autogate 60MHz ULPI clock,
+ * clear dpll clock request for i2c access,
+ * disable 32KHz
+@@ -774,11 +782,6 @@ static int twl4030_usb_remove(struct platform_device *pdev)
+ /* disable complete OTG block */
+ twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
+
+- if (cable_present(twl->linkstat))
+- pm_runtime_put_noidle(twl->dev);
+- pm_runtime_mark_last_busy(twl->dev);
+- pm_runtime_put(twl->dev);
+-
+ return 0;
+ }
+
+diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
+index 02bc5a6343c3..aa454241489c 100644
+--- a/drivers/platform/x86/intel_scu_ipcutil.c
++++ b/drivers/platform/x86/intel_scu_ipcutil.c
+@@ -49,7 +49,7 @@ struct scu_ipc_data {
+
+ static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
+ {
+- int count = data->count;
++ unsigned int count = data->count;
+
+ if (count == 0 || count == 3 || count > 4)
+ return -EINVAL;
+diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
+index a2597e683e79..6a64e86e8ccd 100644
+--- a/drivers/s390/block/dasd_alias.c
++++ b/drivers/s390/block/dasd_alias.c
+@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ cancel_work_sync(&lcu->suc_data.worker);
+ spin_lock_irqsave(&lcu->lock, flags);
+- if (device == lcu->suc_data.device)
++ if (device == lcu->suc_data.device) {
++ dasd_put_device(device);
+ lcu->suc_data.device = NULL;
++ }
+ }
+ was_pending = 0;
+ if (device == lcu->ruac_data.device) {
+@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+ was_pending = 1;
+ cancel_delayed_work_sync(&lcu->ruac_data.dwork);
+ spin_lock_irqsave(&lcu->lock, flags);
+- if (device == lcu->ruac_data.device)
++ if (device == lcu->ruac_data.device) {
++ dasd_put_device(device);
+ lcu->ruac_data.device = NULL;
++ }
+ }
+ private->lcu = NULL;
+ spin_unlock_irqrestore(&lcu->lock, flags);
+@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
+ if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
+ " alias data in lcu (rc = %d), retry later", rc);
+- schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
++ dasd_put_device(device);
+ } else {
++ dasd_put_device(device);
+ lcu->ruac_data.device = NULL;
+ lcu->flags &= ~UPDATE_PENDING;
+ }
+@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
+ */
+ if (!usedev)
+ return -EINVAL;
++ dasd_get_device(usedev);
+ lcu->ruac_data.device = usedev;
+- schedule_delayed_work(&lcu->ruac_data.dwork, 0);
++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
++ dasd_put_device(usedev);
+ return 0;
+ }
+
+@@ -722,7 +730,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
+ ASCEBC((char *) &cqr->magic, 4);
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_RSCK;
+- ccw->flags = 0 ;
++ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = 16;
+ ccw->cda = (__u32)(addr_t) cqr->data;
+ ((char *)cqr->data)[0] = reason;
+@@ -926,6 +934,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
+ /* 3. read new alias configuration */
+ _schedule_lcu_update(lcu, device);
+ lcu->suc_data.device = NULL;
++ dasd_put_device(device);
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ }
+
+@@ -985,6 +994,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
+ }
+ lcu->suc_data.reason = reason;
+ lcu->suc_data.device = device;
++ dasd_get_device(device);
+ spin_unlock(&lcu->lock);
+- schedule_work(&lcu->suc_data.worker);
++ if (!schedule_work(&lcu->suc_data.worker))
++ dasd_put_device(device);
+ };
+diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
+index 1b5bc9293e37..cd4042a22a56 100644
+--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
++++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
+@@ -569,7 +569,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
+ /*
+ * Command Lock contention
+ */
+- err = SCSI_DH_RETRY;
++ err = SCSI_DH_IMM_RETRY;
+ break;
+ default:
+ break;
+@@ -619,6 +619,8 @@ retry:
+ err = mode_select_handle_sense(sdev, h->sense);
+ if (err == SCSI_DH_RETRY && retry_cnt--)
+ goto retry;
++ if (err == SCSI_DH_IMM_RETRY)
++ goto retry;
+ }
+ if (err == SCSI_DH_OK) {
+ h->state = RDAC_STATE_ACTIVE;
+diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
+index d77fe43793b6..88ff7c3417ee 100644
+--- a/drivers/scsi/qla2xxx/qla_dbg.c
++++ b/drivers/scsi/qla2xxx/qla_dbg.c
+@@ -67,10 +67,10 @@
+ * | | | 0xd031-0xd0ff |
+ * | | | 0xd101-0xd1fe |
+ * | | | 0xd214-0xd2fe |
+- * | Target Mode | 0xe079 | |
+- * | Target Mode Management | 0xf072 | 0xf002 |
++ * | Target Mode | 0xe080 | |
++ * | Target Mode Management | 0xf096 | 0xf002 |
+ * | | | 0xf046-0xf049 |
+- * | Target Mode Task Management | 0x1000b | |
++ * | Target Mode Task Management | 0x1000d | |
+ * ----------------------------------------------------------------------
+ */
+
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 5f6b2960cccb..c03d33f4df3a 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -274,6 +274,7 @@
+ #define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
+
+ struct req_que;
++struct qla_tgt_sess;
+
+ /*
+ * (sd.h is not exported, hence local inclusion)
+@@ -2026,6 +2027,7 @@ typedef struct fc_port {
+ uint16_t port_id;
+
+ unsigned long retry_delay_timestamp;
++ struct qla_tgt_sess *tgt_session;
+ } fc_port_t;
+
+ #include "qla_mr.h"
+@@ -3576,6 +3578,16 @@ typedef struct scsi_qla_host {
+ uint16_t fcoe_fcf_idx;
+ uint8_t fcoe_vn_port_mac[6];
+
++ /* list of commands waiting on workqueue */
++ struct list_head qla_cmd_list;
++ struct list_head qla_sess_op_cmd_list;
++ spinlock_t cmd_list_lock;
++
++ /* Counter to detect races between ELS and RSCN events */
++ atomic_t generation_tick;
++ /* Time when global fcport update has been scheduled */
++ int total_fcport_update_gen;
++
+ uint32_t vp_abort_cnt;
+
+ struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index a4dde7e80dbd..c919ac042593 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data)
+ QLA_LOGIO_LOGIN_RETRIED : 0;
+ qla2x00_post_async_login_done_work(fcport->vha, fcport,
+ lio->u.logio.data);
++ } else if (sp->type == SRB_LOGOUT_CMD) {
++ qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
+ }
+ }
+
+@@ -497,7 +499,10 @@ void
+ qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+ {
+- qla2x00_mark_device_lost(vha, fcport, 1, 0);
++ /* Don't re-login in target mode */
++ if (!fcport->tgt_session)
++ qla2x00_mark_device_lost(vha, fcport, 1, 0);
++ qlt_logo_completion_handler(fcport, data[0]);
+ return;
+ }
+
+@@ -2141,7 +2146,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
+ /* Clear outstanding commands array. */
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+- if (!req)
++ if (!req || !test_bit(que, ha->req_qid_map))
+ continue;
+ req->out_ptr = (void *)(req->ring + req->length);
+ *req->out_ptr = 0;
+@@ -2158,7 +2163,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
+
+ for (que = 0; que < ha->max_rsp_queues; que++) {
+ rsp = ha->rsp_q_map[que];
+- if (!rsp)
++ if (!rsp || !test_bit(que, ha->rsp_qid_map))
+ continue;
+ rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ *rsp->in_ptr = 0;
+@@ -2871,21 +2876,14 @@ qla2x00_rport_del(void *data)
+ {
+ fc_port_t *fcport = data;
+ struct fc_rport *rport;
+- scsi_qla_host_t *vha = fcport->vha;
+ unsigned long flags;
+
+ spin_lock_irqsave(fcport->vha->host->host_lock, flags);
+ rport = fcport->drport ? fcport->drport: fcport->rport;
+ fcport->drport = NULL;
+ spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
+- if (rport) {
++ if (rport)
+ fc_remote_port_delete(rport);
+- /*
+- * Release the target mode FC NEXUS in qla_target.c code
+- * if target mod is enabled.
+- */
+- qlt_fc_port_deleted(vha, fcport);
+- }
+ }
+
+ /**
+@@ -3254,6 +3252,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
+ * Create target mode FC NEXUS in qla_target.c if target mode is
+ * enabled..
+ */
++
+ qlt_fc_port_added(vha, fcport);
+
+ spin_lock_irqsave(fcport->vha->host->host_lock, flags);
+@@ -3326,6 +3325,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
+ LIST_HEAD(new_fcports);
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
++ int discovery_gen;
+
+ /* If FL port exists, then SNS is present */
+ if (IS_FWI2_CAPABLE(ha))
+@@ -3396,6 +3396,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
++ /* Mark the time right before querying FW for connected ports.
++ * This process is long, asynchronous and by the time it's done,
++ * collected information might not be accurate anymore. E.g.
++ * disconnected port might have re-connected and a brand new
++ * session has been created. In this case session's generation
++ * will be newer than discovery_gen. */
++ qlt_do_generation_tick(vha, &discovery_gen);
++
+ rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
+ if (rval != QLA_SUCCESS)
+ break;
+@@ -3411,20 +3419,44 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
+ continue;
+
+- if (fcport->scan_state == QLA_FCPORT_SCAN &&
+- atomic_read(&fcport->state) == FCS_ONLINE) {
+- qla2x00_mark_device_lost(vha, fcport,
+- ql2xplogiabsentdevice, 0);
+- if (fcport->loop_id != FC_NO_LOOP_ID &&
+- (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+- fcport->port_type != FCT_INITIATOR &&
+- fcport->port_type != FCT_BROADCAST) {
+- ha->isp_ops->fabric_logout(vha,
+- fcport->loop_id,
+- fcport->d_id.b.domain,
+- fcport->d_id.b.area,
+- fcport->d_id.b.al_pa);
+- qla2x00_clear_loop_id(fcport);
++ if (fcport->scan_state == QLA_FCPORT_SCAN) {
++ if (qla_ini_mode_enabled(base_vha) &&
++ atomic_read(&fcport->state) == FCS_ONLINE) {
++ qla2x00_mark_device_lost(vha, fcport,
++ ql2xplogiabsentdevice, 0);
++ if (fcport->loop_id != FC_NO_LOOP_ID &&
++ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
++ fcport->port_type != FCT_INITIATOR &&
++ fcport->port_type != FCT_BROADCAST) {
++ ha->isp_ops->fabric_logout(vha,
++ fcport->loop_id,
++ fcport->d_id.b.domain,
++ fcport->d_id.b.area,
++ fcport->d_id.b.al_pa);
++ qla2x00_clear_loop_id(fcport);
++ }
++ } else if (!qla_ini_mode_enabled(base_vha)) {
++ /*
++ * In target mode, explicitly kill
++ * sessions and log out of devices
++ * that are gone, so that we don't
++ * end up with an initiator using the
++ * wrong ACL (if the fabric recycles
++ * an FC address and we have a stale
++ * session around) and so that we don't
++ * report initiators that are no longer
++ * on the fabric.
++ */
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
++ "port gone, logging out/killing session: "
++ "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
++ "scan_state %d\n",
++ fcport->port_name,
++ atomic_read(&fcport->state),
++ fcport->flags, fcport->fc4_type,
++ fcport->scan_state);
++ qlt_fc_port_deleted(vha, fcport,
++ discovery_gen);
+ }
+ }
+ }
+@@ -3445,6 +3477,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
+ (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+ continue;
+
++ /*
++ * If we're not an initiator, skip looking for devices
++ * and logging in. There's no reason for us to do it,
++ * and it seems to actively cause problems in target
++ * mode if we race with the initiator logging into us
++ * (we might get the "port ID used" status back from
++ * our login command and log out the initiator, which
++ * seems to cause havoc).
++ */
++ if (!qla_ini_mode_enabled(base_vha)) {
++ if (fcport->scan_state == QLA_FCPORT_FOUND) {
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
++ "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
++ "scan_state %d (initiator mode disabled; skipping "
++ "login)\n", fcport->port_name,
++ atomic_read(&fcport->state),
++ fcport->flags, fcport->fc4_type,
++ fcport->scan_state);
++ }
++ continue;
++ }
++
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(
+@@ -3471,16 +3525,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+- /* Find a new loop ID to use. */
+- fcport->loop_id = next_loopid;
+- rval = qla2x00_find_new_loop_id(base_vha, fcport);
+- if (rval != QLA_SUCCESS) {
+- /* Ran out of IDs to use */
+- break;
+- }
++ /*
++ * If we're not an initiator, skip looking for devices
++ * and logging in. There's no reason for us to do it,
++ * and it seems to actively cause problems in target
++ * mode if we race with the initiator logging into us
++ * (we might get the "port ID used" status back from
++ * our login command and log out the initiator, which
++ * seems to cause havoc).
++ */
++ if (qla_ini_mode_enabled(base_vha)) {
++ /* Find a new loop ID to use. */
++ fcport->loop_id = next_loopid;
++ rval = qla2x00_find_new_loop_id(base_vha,
++ fcport);
++ if (rval != QLA_SUCCESS) {
++ /* Ran out of IDs to use */
++ break;
++ }
+
+- /* Login and update database */
+- qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
++ /* Login and update database */
++ qla2x00_fabric_dev_login(vha, fcport,
++ &next_loopid);
++ } else {
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
++ "new port %8phC state 0x%x flags 0x%x fc4_type "
++ "0x%x scan_state %d (initiator mode disabled; "
++ "skipping login)\n",
++ fcport->port_name,
++ atomic_read(&fcport->state),
++ fcport->flags, fcport->fc4_type,
++ fcport->scan_state);
++ }
+
+ list_move_tail(&fcport->list, &vha->vp_fcports);
+ }
+@@ -3676,11 +3752,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
+ fcport->fp_speed = new_fcport->fp_speed;
+
+ /*
+- * If address the same and state FCS_ONLINE, nothing
+- * changed.
++ * If address the same and state FCS_ONLINE
++ * (or in target mode), nothing changed.
+ */
+ if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
+- atomic_read(&fcport->state) == FCS_ONLINE) {
++ (atomic_read(&fcport->state) == FCS_ONLINE ||
++ !qla_ini_mode_enabled(base_vha))) {
+ break;
+ }
+
+@@ -3700,6 +3777,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
+ * Log it out if still logged in and mark it for
+ * relogin later.
+ */
++ if (!qla_ini_mode_enabled(base_vha)) {
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
++ "port changed FC ID, %8phC"
++ " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
++ fcport->port_name,
++ fcport->d_id.b.domain,
++ fcport->d_id.b.area,
++ fcport->d_id.b.al_pa,
++ fcport->loop_id,
++ new_fcport->d_id.b.domain,
++ new_fcport->d_id.b.area,
++ new_fcport->d_id.b.al_pa);
++ fcport->d_id.b24 = new_fcport->d_id.b24;
++ break;
++ }
++
+ fcport->d_id.b24 = new_fcport->d_id.b24;
+ fcport->flags |= FCF_LOGIN_NEEDED;
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+@@ -3719,6 +3812,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
+ if (found)
+ continue;
+ /* If device was not in our fcports list, then add it. */
++ new_fcport->scan_state = QLA_FCPORT_FOUND;
+ list_add_tail(&new_fcport->list, new_fcports);
+
+ /* Allocate a new replacement fcport. */
+@@ -4139,6 +4233,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
+ atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qla2x00_rport_del(fcport);
++
++ /*
++ * Release the target mode FC NEXUS in
++ * qla_target.c, if target mod is enabled.
++ */
++ qlt_fc_port_deleted(vha, fcport,
++ base_vha->total_fcport_update_gen);
++
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ }
+ }
+@@ -4806,7 +4908,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
+
+ for (i = 1; i < ha->max_rsp_queues; i++) {
+ rsp = ha->rsp_q_map[i];
+- if (rsp) {
++ if (rsp && test_bit(i, ha->rsp_qid_map)) {
+ rsp->options &= ~BIT_0;
+ ret = qla25xx_init_rsp_que(base_vha, rsp);
+ if (ret != QLA_SUCCESS)
+@@ -4821,8 +4923,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
+ }
+ for (i = 1; i < ha->max_req_queues; i++) {
+ req = ha->req_q_map[i];
+- if (req) {
+- /* Clear outstanding commands array. */
++ if (req && test_bit(i, ha->req_qid_map)) {
++ /* Clear outstanding commands array. */
+ req->options &= ~BIT_0;
+ ret = qla25xx_init_req_que(base_vha, req);
+ if (ret != QLA_SUCCESS)
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index f0edb07f3198..9788d30b21bf 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -1998,6 +1998,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags =
+ cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
++ if (!sp->fcport->tgt_session ||
++ !sp->fcport->tgt_session->keep_nport_handle)
++ logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
+ logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+ logio->port_id[1] = sp->fcport->d_id.b.area;
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index a04a1b1f7f32..e19117766369 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -2981,9 +2981,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ "MSI-X: Failed to enable support "
+ "-- %d/%d\n Retry with %d vectors.\n",
+ ha->msix_count, ret, ret);
++ ha->msix_count = ret;
++ ha->max_rsp_queues = ha->msix_count - 1;
+ }
+- ha->msix_count = ret;
+- ha->max_rsp_queues = ha->msix_count - 1;
+ ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
+ ha->msix_count, GFP_KERNEL);
+ if (!ha->msix_entries) {
+diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
+index 5c2e0317f1c0..7c0b33d21f24 100644
+--- a/drivers/scsi/qla2xxx/qla_mid.c
++++ b/drivers/scsi/qla2xxx/qla_mid.c
+@@ -595,7 +595,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
+ /* Delete request queues */
+ for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+ req = ha->req_q_map[cnt];
+- if (req) {
++ if (req && test_bit(cnt, ha->req_qid_map)) {
+ ret = qla25xx_delete_req_que(vha, req);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00ea,
+@@ -609,7 +609,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
+ /* Delete response queues */
+ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+ rsp = ha->rsp_q_map[cnt];
+- if (rsp) {
++ if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
+ ret = qla25xx_delete_rsp_que(vha, rsp);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00eb,
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index db3dbd999cb6..e85978248323 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -399,6 +399,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
+ int cnt;
+
+ for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
++ if (!test_bit(cnt, ha->req_qid_map))
++ continue;
++
+ req = ha->req_q_map[cnt];
+ qla2x00_free_req_que(ha, req);
+ }
+@@ -406,6 +409,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
+ ha->req_q_map = NULL;
+
+ for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
++ if (!test_bit(cnt, ha->rsp_qid_map))
++ continue;
++
+ rsp = ha->rsp_q_map[cnt];
+ qla2x00_free_rsp_que(ha, rsp);
+ }
+@@ -735,7 +741,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+ * Return target busy if we've received a non-zero retry_delay_timer
+ * in a FCP_RSP.
+ */
+- if (time_after(jiffies, fcport->retry_delay_timestamp))
++ if (fcport->retry_delay_timestamp == 0) {
++ /* retry delay not set */
++ } else if (time_after(jiffies, fcport->retry_delay_timestamp))
+ fcport->retry_delay_timestamp = 0;
+ else
+ goto qc24_target_busy;
+@@ -3301,11 +3309,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
+ spin_lock_irqsave(vha->host->host_lock, flags);
+ fcport->drport = rport;
+ spin_unlock_irqrestore(vha->host->host_lock, flags);
++ qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
+ set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
+ qla2xxx_wake_dpc(base_vha);
+ } else {
++ int now;
+ fc_remote_port_delete(rport);
+- qlt_fc_port_deleted(vha, fcport);
++ qlt_do_generation_tick(vha, &now);
++ qlt_fc_port_deleted(vha, fcport, now);
+ }
+ }
+
+@@ -3835,8 +3846,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
+ INIT_LIST_HEAD(&vha->vp_fcports);
+ INIT_LIST_HEAD(&vha->work_list);
+ INIT_LIST_HEAD(&vha->list);
++ INIT_LIST_HEAD(&vha->qla_cmd_list);
++ INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
+
+ spin_lock_init(&vha->work_lock);
++ spin_lock_init(&vha->cmd_list_lock);
+
+ sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
+ ql_dbg(ql_dbg_init, vha, 0x0041,
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index a902fa1db7af..9f296dfeeb7f 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
+ static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status, int qfull);
+ static void qlt_disable_vha(struct scsi_qla_host *vha);
++static void qlt_clear_tgt_db(struct qla_tgt *tgt);
++static void qlt_send_notify_ack(struct scsi_qla_host *vha,
++ struct imm_ntfy_from_isp *ntfy,
++ uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
++ uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
+ /*
+ * Global Variables
+ */
+@@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
+ static DEFINE_MUTEX(qla_tgt_mutex);
+ static LIST_HEAD(qla_tgt_glist);
+
++/* This API intentionally takes dest as a parameter, rather than returning
++ * int value to avoid caller forgetting to issue wmb() after the store */
++void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
++{
++ scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
++ *dest = atomic_inc_return(&base_vha->generation_tick);
++ /* memory barrier */
++ wmb();
++}
++
+ /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
+ static struct qla_tgt_sess *qlt_find_sess_by_port_name(
+ struct qla_tgt *tgt,
+@@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
+ struct qla_tgt *tgt = sess->tgt;
+ struct scsi_qla_host *vha = sess->vha;
+ struct qla_hw_data *ha = vha->hw;
++ unsigned long flags;
++ bool logout_started = false;
++ fc_port_t fcport;
++
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
++ "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
++ " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
++ __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
++ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
++ sess->logout_on_delete, sess->keep_nport_handle,
++ sess->plogi_ack_needed);
+
+ BUG_ON(!tgt);
++
++ if (sess->logout_on_delete) {
++ int rc;
++
++ memset(&fcport, 0, sizeof(fcport));
++ fcport.loop_id = sess->loop_id;
++ fcport.d_id = sess->s_id;
++ memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
++ fcport.vha = vha;
++ fcport.tgt_session = sess;
++
++ rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
++ if (rc != QLA_SUCCESS)
++ ql_log(ql_log_warn, vha, 0xf085,
++ "Schedule logo failed sess %p rc %d\n",
++ sess, rc);
++ else
++ logout_started = true;
++ }
++
+ /*
+ * Release the target session for FC Nexus from fabric module code.
+ */
+ if (sess->se_sess != NULL)
+ ha->tgt.tgt_ops->free_session(sess);
+
++ if (logout_started) {
++ bool traced = false;
++
++ while (!ACCESS_ONCE(sess->logout_completed)) {
++ if (!traced) {
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
++ "%s: waiting for sess %p logout\n",
++ __func__, sess);
++ traced = true;
++ }
++ msleep(100);
++ }
++
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
++ "%s: sess %p logout completed\n",
++ __func__, sess);
++ }
++
++ spin_lock_irqsave(&ha->hardware_lock, flags);
++
++ if (sess->plogi_ack_needed)
++ qlt_send_notify_ack(vha, &sess->tm_iocb,
++ 0, 0, 0, 0, 0, 0);
++
++ list_del(&sess->sess_list_entry);
++
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+ "Unregistration of sess %p finished\n", sess);
+
+@@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
+
+ vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+
+- list_del(&sess->sess_list_entry);
+- if (sess->deleted)
+- list_del(&sess->del_list_entry);
++ if (!list_empty(&sess->del_list_entry))
++ list_del_init(&sess->del_list_entry);
++ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+
+ INIT_WORK(&sess->free_work, qlt_free_session_done);
+ schedule_work(&sess->free_work);
+@@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
+
+ loop_id = le16_to_cpu(n->u.isp24.nport_handle);
+ if (loop_id == 0xFFFF) {
+-#if 0 /* FIXME: Re-enable Global event handling.. */
+ /* Global event */
+- atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
+- qlt_clear_tgt_db(ha->tgt.qla_tgt);
++ atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
++ qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
++#if 0 /* FIXME: do we need to choose a session here? */
+ if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
+ sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
+ typeof(*sess), sess_list_entry);
+@@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
+ struct qla_tgt *tgt = sess->tgt;
+ uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
+
+- if (sess->deleted)
+- return;
++ if (sess->deleted) {
++ /* Upgrade to unconditional deletion in case it was temporary */
++ if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
++ list_del(&sess->del_list_entry);
++ else
++ return;
++ }
+
+ ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+ "Scheduling sess %p for deletion\n", sess);
+- list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+- sess->deleted = 1;
+
+- if (immediate)
++ if (immediate) {
+ dev_loss_tmo = 0;
++ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
++ list_add(&sess->del_list_entry, &tgt->del_sess_list);
++ } else {
++ sess->deleted = QLA_SESS_DELETION_PENDING;
++ list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
++ }
+
+ sess->expires = jiffies + dev_loss_tmo * HZ;
+
+ ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
+- "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
+- "deletion in %u secs (expires: %lu) immed: %d\n",
+- sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
+- sess->expires, immediate);
++ "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
++ " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
++ sess->vha->vp_idx, sess->port_name, sess->loop_id,
++ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
++ dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
++ sess->generation);
+
+ if (immediate)
+- schedule_delayed_work(&tgt->sess_del_work, 0);
++ mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
+ else
+ schedule_delayed_work(&tgt->sess_del_work,
+ sess->expires - jiffies);
+@@ -578,9 +663,9 @@ out_free_id_list:
+ /* ha->hardware_lock supposed to be held on entry */
+ static void qlt_undelete_sess(struct qla_tgt_sess *sess)
+ {
+- BUG_ON(!sess->deleted);
++ BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
+
+- list_del(&sess->del_list_entry);
++ list_del_init(&sess->del_list_entry);
+ sess->deleted = 0;
+ }
+
+@@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
+ del_list_entry);
+ elapsed = jiffies;
+ if (time_after_eq(elapsed, sess->expires)) {
+- qlt_undelete_sess(sess);
++ /* No turning back */
++ list_del_init(&sess->del_list_entry);
++ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
+ "Timeout: sess %p about to be deleted\n",
+@@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
+ fcport->d_id.b.al_pa, fcport->d_id.b.area,
+ fcport->loop_id);
+
++ /* Cannot undelete at this point */
++ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++ spin_unlock_irqrestore(&ha->hardware_lock,
++ flags);
++ return NULL;
++ }
++
+ if (sess->deleted)
+ qlt_undelete_sess(sess);
+
+@@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
+
+ if (sess->local && !local)
+ sess->local = 0;
++
++ qlt_do_generation_tick(vha, &sess->generation);
++
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return sess;
+@@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->local = local;
++ INIT_LIST_HEAD(&sess->del_list_entry);
++
++ /* Under normal circumstances we want to logout from firmware when
++ * session eventually ends and release corresponding nport handle.
++ * In the exception cases (e.g. when new PLOGI is waiting) corresponding
++ * code will adjust these flags as necessary. */
++ sess->logout_on_delete = 1;
++ sess->keep_nport_handle = 0;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
+ "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
+@@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
+ vha->vha_tgt.qla_tgt->sess_count++;
++ qlt_do_generation_tick(vha, &sess->generation);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
+@@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
+ }
+
+ /*
+- * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
++ * Called from qla2x00_reg_remote_port()
+ */
+ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+ {
+@@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
++ } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++ /* Point of no return */
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ return;
+ } else {
+ kref_get(&sess->se_sess->sess_kref);
+
+@@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+
+-void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
++/*
++ * max_gen - specifies maximum session generation
++ * at which this deletion requestion is still valid
++ */
++void
++qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
+ {
+- struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_sess *sess;
+- unsigned long flags;
+
+ if (!vha->hw->tgt.tgt_ops)
+ return;
+
+- if (!tgt || (fcport->port_type != FCT_INITIATOR))
++ if (!tgt)
+ return;
+
+- spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (tgt->tgt_stop) {
+- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+ sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+ if (!sess) {
+- spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ return;
++ }
++
++ if (max_gen - sess->generation < 0) {
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
++ "Ignoring stale deletion request for se_sess %p / sess %p"
++ " for port %8phC, req_gen %d, sess_gen %d\n",
++ sess->se_sess, sess, sess->port_name, max_gen,
++ sess->generation);
+ return;
+ }
+
+@@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+
+ sess->local = 1;
+ qlt_schedule_sess_for_deletion(sess, false);
+- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+
+ static inline int test_tgt_sess_count(struct qla_tgt *tgt)
+@@ -1175,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
+ FCP_TMF_CMPL, true);
+ }
+
++static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
++{
++ struct qla_tgt_sess_op *op;
++ struct qla_tgt_cmd *cmd;
++
++ spin_lock(&vha->cmd_list_lock);
++
++ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
++ if (tag == op->atio.u.isp24.exchange_addr) {
++ op->aborted = true;
++ spin_unlock(&vha->cmd_list_lock);
++ return 1;
++ }
++ }
++
++ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
++ if (tag == cmd->atio.u.isp24.exchange_addr) {
++ cmd->state = QLA_TGT_STATE_ABORTED;
++ spin_unlock(&vha->cmd_list_lock);
++ return 1;
++ }
++ }
++
++ spin_unlock(&vha->cmd_list_lock);
++ return 0;
++}
++
++/* drop cmds for the given lun
++ * XXX only looks for cmds on the port through which lun reset was recieved
++ * XXX does not go through the list of other port (which may have cmds
++ * for the same lun)
++ */
++static void abort_cmds_for_lun(struct scsi_qla_host *vha,
++ uint32_t lun, uint8_t *s_id)
++{
++ struct qla_tgt_sess_op *op;
++ struct qla_tgt_cmd *cmd;
++ uint32_t key;
++
++ key = sid_to_key(s_id);
++ spin_lock(&vha->cmd_list_lock);
++ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
++ uint32_t op_key;
++ uint32_t op_lun;
++
++ op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
++ op_lun = scsilun_to_int(
++ (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
++ if (op_key == key && op_lun == lun)
++ op->aborted = true;
++ }
++ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
++ uint32_t cmd_key;
++ uint32_t cmd_lun;
++
++ cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
++ cmd_lun = scsilun_to_int(
++ (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
++ if (cmd_key == key && cmd_lun == lun)
++ cmd->state = QLA_TGT_STATE_ABORTED;
++ }
++ spin_unlock(&vha->cmd_list_lock);
++}
++
+ /* ha->hardware_lock supposed to be held on entry */
+ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
+@@ -1199,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ }
+ spin_unlock(&se_sess->sess_cmd_lock);
+
+- if (!found_lun)
+- return -ENOENT;
++ /* cmd not in LIO lists, look in qla list */
++ if (!found_lun) {
++ if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
++ /* send TASK_ABORT response immediately */
++ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
++ return 0;
++ } else {
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
++ "unable to find cmd in driver or LIO for tag 0x%x\n",
++ abts->exchange_addr_to_abort);
++ return -ENOENT;
++ }
++ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
+ "qla_target(%d): task abort (tag=%d)\n",
+@@ -1284,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ return;
+ }
+
++ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
++ return;
++ }
++
+ rc = __qlt_24xx_handle_abts(vha, abts, sess);
+ if (rc != 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
+@@ -1726,21 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
+ struct qla_hw_data *ha = vha->hw;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+- if (unlikely(cmd->aborted)) {
+- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+- "qla_target(%d): terminating exchange "
+- "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
+- se_cmd, cmd->tag);
+-
+- cmd->state = QLA_TGT_STATE_ABORTED;
+- cmd->cmd_flags |= BIT_6;
+-
+- qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+-
+- /* !! At this point cmd could be already freed !! */
+- return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
+- }
+-
+ prm->cmd = cmd;
+ prm->tgt = tgt;
+ prm->rq_result = scsi_status;
+@@ -2303,6 +2486,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+ unsigned long flags = 0;
+ int res;
+
++ spin_lock_irqsave(&ha->hardware_lock, flags);
++ if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++ cmd->state = QLA_TGT_STATE_PROCESSED;
++ if (cmd->sess->logout_completed)
++ /* no need to terminate. FW already freed exchange. */
++ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
++ else
++ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++ return 0;
++ }
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
+ memset(&prm, 0, sizeof(prm));
+ qlt_check_srr_debug(cmd, &xmit_type);
+
+@@ -2315,9 +2511,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+ res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+ &full_req_cnt);
+ if (unlikely(res != 0)) {
+- if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
+- return 0;
+-
+ return res;
+ }
+
+@@ -2463,7 +2656,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+- if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
++ if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
++ (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
+ /*
+ * Either a chip reset is active or this request was from
+ * previous life, just abort the processing.
+@@ -2653,6 +2847,89 @@ out:
+
+ /* If hardware_lock held on entry, might drop it, then reaquire */
+ /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
++static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
++ struct imm_ntfy_from_isp *ntfy)
++{
++ struct nack_to_isp *nack;
++ struct qla_hw_data *ha = vha->hw;
++ request_t *pkt;
++ int ret = 0;
++
++ ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
++ "Sending TERM ELS CTIO (ha=%p)\n", ha);
++
++ pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
++ if (pkt == NULL) {
++ ql_dbg(ql_dbg_tgt, vha, 0xe080,
++ "qla_target(%d): %s failed: unable to allocate "
++ "request packet\n", vha->vp_idx, __func__);
++ return -ENOMEM;
++ }
++
++ pkt->entry_type = NOTIFY_ACK_TYPE;
++ pkt->entry_count = 1;
++ pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
++
++ nack = (struct nack_to_isp *)pkt;
++ nack->ox_id = ntfy->ox_id;
++
++ nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
++ if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
++ nack->u.isp24.flags = ntfy->u.isp24.flags &
++ __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
++ }
++
++ /* terminate */
++ nack->u.isp24.flags |=
++ __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
++
++ nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
++ nack->u.isp24.status = ntfy->u.isp24.status;
++ nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
++ nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
++ nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
++ nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
++ nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
++ nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
++
++ qla2x00_start_iocbs(vha, vha->req);
++ return ret;
++}
++
++static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
++ struct imm_ntfy_from_isp *imm, int ha_locked)
++{
++ unsigned long flags = 0;
++ int rc;
++
++ if (qlt_issue_marker(vha, ha_locked) < 0)
++ return;
++
++ if (ha_locked) {
++ rc = __qlt_send_term_imm_notif(vha, imm);
++
++#if 0 /* Todo */
++ if (rc == -ENOMEM)
++ qlt_alloc_qfull_cmd(vha, imm, 0, 0);
++#endif
++ goto done;
++ }
++
++ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
++ rc = __qlt_send_term_imm_notif(vha, imm);
++
++#if 0 /* Todo */
++ if (rc == -ENOMEM)
++ qlt_alloc_qfull_cmd(vha, imm, 0, 0);
++#endif
++
++done:
++ if (!ha_locked)
++ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
++}
++
++/* If hardware_lock held on entry, might drop it, then reaquire */
++/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd,
+ struct atio_from_isp *atio)
+@@ -2794,6 +3071,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
+
+ }
+
++void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
++{
++ struct qla_tgt *tgt = cmd->tgt;
++ struct scsi_qla_host *vha = tgt->vha;
++ struct se_cmd *se_cmd = &cmd->se_cmd;
++
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
++ "qla_target(%d): terminating exchange for aborted cmd=%p "
++ "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
++ cmd->tag);
++
++ cmd->state = QLA_TGT_STATE_ABORTED;
++ cmd->cmd_flags |= BIT_6;
++
++ qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
++}
++EXPORT_SYMBOL(qlt_abort_cmd);
++
+ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
+ {
+ struct qla_tgt_sess *sess = cmd->sess;
+@@ -3265,6 +3560,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
+ if (tgt->tgt_stop)
+ goto out_term;
+
++ if (cmd->state == QLA_TGT_STATE_ABORTED) {
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
++ "cmd with tag %u is aborted\n",
++ cmd->atio.u.isp24.exchange_addr);
++ goto out_term;
++ }
++
+ cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
+ cmd->tag = atio->u.isp24.exchange_addr;
+ cmd->unpacked_lun = scsilun_to_int(
+@@ -3318,6 +3620,12 @@ out_term:
+ static void qlt_do_work(struct work_struct *work)
+ {
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
++ scsi_qla_host_t *vha = cmd->vha;
++ unsigned long flags;
++
++ spin_lock_irqsave(&vha->cmd_list_lock, flags);
++ list_del(&cmd->cmd_list);
++ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+ __qlt_do_work(cmd);
+ }
+@@ -3364,14 +3672,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
+ unsigned long flags;
+ uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
+
++ spin_lock_irqsave(&vha->cmd_list_lock, flags);
++ list_del(&op->cmd_list);
++ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
++
++ if (op->aborted) {
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
++ "sess_op with tag %u is aborted\n",
++ op->atio.u.isp24.exchange_addr);
++ goto out_term;
++ }
++
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
+- "qla_target(%d): Unable to find wwn login"
+- " (s_id %x:%x:%x), trying to create it manually\n",
+- vha->vp_idx, s_id[0], s_id[1], s_id[2]);
++ "qla_target(%d): Unable to find wwn login"
++ " (s_id %x:%x:%x), trying to create it manually\n",
++ vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+
+ if (op->atio.u.raw.entry_count > 1) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
+- "Dropping multy entry atio %p\n", &op->atio);
++ "Dropping multy entry atio %p\n", &op->atio);
+ goto out_term;
+ }
+
+@@ -3436,10 +3755,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+
+ memcpy(&op->atio, atio, sizeof(*atio));
+ op->vha = vha;
++
++ spin_lock(&vha->cmd_list_lock);
++ list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
++ spin_unlock(&vha->cmd_list_lock);
++
+ INIT_WORK(&op->work, qlt_create_sess_from_atio);
+ queue_work(qla_tgt_wq, &op->work);
+ return 0;
+ }
++
++ /* Another WWN used to have our s_id. Our PLOGI scheduled its
++ * session deletion, but it's still in sess_del_work wq */
++ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++ ql_dbg(ql_dbg_io, vha, 0x3061,
++ "New command while old session %p is being deleted\n",
++ sess);
++ return -EFAULT;
++ }
++
+ /*
+ * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
+ */
+@@ -3460,6 +3794,11 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+
+ cmd->cmd_in_wq = 1;
+ cmd->cmd_flags |= BIT_0;
++
++ spin_lock(&vha->cmd_list_lock);
++ list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
++ spin_unlock(&vha->cmd_list_lock);
++
+ INIT_WORK(&cmd->work, qlt_do_work);
+ queue_work(qla_tgt_wq, &cmd->work);
+ return 0;
+@@ -3473,6 +3812,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+ struct scsi_qla_host *vha = sess->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_mgmt_cmd *mcmd;
++ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+ int res;
+ uint8_t tmr_func;
+
+@@ -3513,6 +3853,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
+ "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
+ tmr_func = TMR_LUN_RESET;
++ abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
+ break;
+
+ case QLA_TGT_CLEAR_TS:
+@@ -3601,6 +3942,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
+ sizeof(struct atio_from_isp));
+ }
+
++ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
++ return -EFAULT;
++
+ return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+ }
+
+@@ -3666,22 +4010,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
+ return __qlt_abort_task(vha, iocb, sess);
+ }
+
++void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
++{
++ if (fcport->tgt_session) {
++ if (rc != MBS_COMMAND_COMPLETE) {
++ ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
++ "%s: se_sess %p / sess %p from"
++ " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
++ " LOGO failed: %#x\n",
++ __func__,
++ fcport->tgt_session->se_sess,
++ fcport->tgt_session,
++ fcport->port_name, fcport->loop_id,
++ fcport->d_id.b.domain, fcport->d_id.b.area,
++ fcport->d_id.b.al_pa, rc);
++ }
++
++ fcport->tgt_session->logout_completed = 1;
++ }
++}
++
++static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
++ struct imm_ntfy_from_isp *b)
++{
++ struct imm_ntfy_from_isp tmp;
++ memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
++ memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
++ memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
++}
++
++/*
++* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
++*
++* Schedules sessions with matching port_id/loop_id but different wwn for
++* deletion. Returns existing session with matching wwn if present.
++* Null otherwise.
++*/
++static struct qla_tgt_sess *
++qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
++ port_id_t port_id, uint16_t loop_id)
++{
++ struct qla_tgt_sess *sess = NULL, *other_sess;
++ uint64_t other_wwn;
++
++ list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
++
++ other_wwn = wwn_to_u64(other_sess->port_name);
++
++ if (wwn == other_wwn) {
++ WARN_ON(sess);
++ sess = other_sess;
++ continue;
++ }
++
++ /* find other sess with nport_id collision */
++ if (port_id.b24 == other_sess->s_id.b24) {
++ if (loop_id != other_sess->loop_id) {
++ ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
++ "Invalidating sess %p loop_id %d wwn %llx.\n",
++ other_sess, other_sess->loop_id, other_wwn);
++
++ /*
++ * logout_on_delete is set by default, but another
++ * session that has the same s_id/loop_id combo
++ * might have cleared it when requested this session
++ * deletion, so don't touch it
++ */
++ qlt_schedule_sess_for_deletion(other_sess, true);
++ } else {
++ /*
++ * Another wwn used to have our s_id/loop_id
++ * combo - kill the session, but don't log out
++ */
++ sess->logout_on_delete = 0;
++ qlt_schedule_sess_for_deletion(other_sess,
++ true);
++ }
++ continue;
++ }
++
++ /* find other sess with nport handle collision */
++ if (loop_id == other_sess->loop_id) {
++ ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
++ "Invalidating sess %p loop_id %d wwn %llx.\n",
++ other_sess, other_sess->loop_id, other_wwn);
++
++ /* Same loop_id but different s_id
++ * Ok to kill and logout */
++ qlt_schedule_sess_for_deletion(other_sess, true);
++ }
++ }
++
++ return sess;
++}
++
++/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
++static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
++{
++ struct qla_tgt_sess_op *op;
++ struct qla_tgt_cmd *cmd;
++ uint32_t key;
++ int count = 0;
++
++ key = (((u32)s_id->b.domain << 16) |
++ ((u32)s_id->b.area << 8) |
++ ((u32)s_id->b.al_pa));
++
++ spin_lock(&vha->cmd_list_lock);
++ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
++ uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
++ if (op_key == key) {
++ op->aborted = true;
++ count++;
++ }
++ }
++ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
++ uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
++ if (cmd_key == key) {
++ cmd->state = QLA_TGT_STATE_ABORTED;
++ count++;
++ }
++ }
++ spin_unlock(&vha->cmd_list_lock);
++
++ return count;
++}
++
+ /*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+ {
++ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
++ struct qla_hw_data *ha = vha->hw;
++ struct qla_tgt_sess *sess = NULL;
++ uint64_t wwn;
++ port_id_t port_id;
++ uint16_t loop_id;
++ uint16_t wd3_lo;
+ int res = 0;
+
++ wwn = wwn_to_u64(iocb->u.isp24.port_name);
++
++ port_id.b.domain = iocb->u.isp24.port_id[2];
++ port_id.b.area = iocb->u.isp24.port_id[1];
++ port_id.b.al_pa = iocb->u.isp24.port_id[0];
++ port_id.b.rsvd_1 = 0;
++
++ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
++
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
+ "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
+ vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
+
++ /* res = 1 means ack at the end of thread
++ * res = 0 means ack async/later.
++ */
+ switch (iocb->u.isp24.status_subcode) {
+ case ELS_PLOGI:
+- case ELS_FLOGI:
++
++ /* Mark all stale commands in qla_tgt_wq for deletion */
++ abort_cmds_for_s_id(vha, &port_id);
++
++ if (wwn)
++ sess = qlt_find_sess_invalidate_other(tgt, wwn,
++ port_id, loop_id);
++
++ if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
++ res = 1;
++ break;
++ }
++
++ if (sess->plogi_ack_needed) {
++ /*
++ * Initiator sent another PLOGI before last PLOGI could
++ * finish. Swap plogi iocbs and terminate old one
++ * without acking, new one will get acked when session
++ * deletion completes.
++ */
++ ql_log(ql_log_warn, sess->vha, 0xf094,
++ "sess %p received double plogi.\n", sess);
++
++ qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
++
++ qlt_send_term_imm_notif(vha, iocb, 1);
++
++ res = 0;
++ break;
++ }
++
++ res = 0;
++
++ /*
++ * Save immediate Notif IOCB for Ack when sess is done
++ * and being deleted.
++ */
++ memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
++ sess->plogi_ack_needed = 1;
++
++ /*
++ * Under normal circumstances we want to release nport handle
++ * during LOGO process to avoid nport handle leaks inside FW.
++ * The exception is when LOGO is done while another PLOGI with
++ * the same nport handle is waiting as might be the case here.
++ * Note: there is always a possibily of a race where session
++ * deletion has already started for other reasons (e.g. ACL
++ * removal) and now PLOGI arrives:
++ * 1. if PLOGI arrived in FW after nport handle has been freed,
++ * FW must have assigned this PLOGI a new/same handle and we
++ * can proceed ACK'ing it as usual when session deletion
++ * completes.
++ * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
++ * bit reached it, the handle has now been released. We'll
++ * get an error when we ACK this PLOGI. Nothing will be sent
++ * back to initiator. Initiator should eventually retry
++ * PLOGI and situation will correct itself.
++ */
++ sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
++ (sess->s_id.b24 == port_id.b24));
++ qlt_schedule_sess_for_deletion(sess, true);
++ break;
++
+ case ELS_PRLI:
++ wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
++
++ if (wwn)
++ sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
++ loop_id);
++
++ if (sess != NULL) {
++ if (sess->deleted) {
++ /*
++ * Impatient initiator sent PRLI before last
++ * PLOGI could finish. Will force him to re-try,
++ * while last one finishes.
++ */
++ ql_log(ql_log_warn, sess->vha, 0xf095,
++ "sess %p PRLI received, before plogi ack.\n",
++ sess);
++ qlt_send_term_imm_notif(vha, iocb, 1);
++ res = 0;
++ break;
++ }
++
++ /*
++ * This shouldn't happen under normal circumstances,
++ * since we have deleted the old session during PLOGI
++ */
++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
++ "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
++ sess->loop_id, sess, iocb->u.isp24.nport_handle);
++
++ sess->local = 0;
++ sess->loop_id = loop_id;
++ sess->s_id = port_id;
++
++ if (wd3_lo & BIT_7)
++ sess->conf_compl_supported = 1;
++
++ }
++ res = 1; /* send notify ack */
++
++ /* Make session global (not used in fabric mode) */
++ if (ha->current_topology != ISP_CFG_F) {
++ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
++ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
++ qla2xxx_wake_dpc(vha);
++ } else {
++ /* todo: else - create sess here. */
++ res = 1; /* send notify ack */
++ }
++
++ break;
++
+ case ELS_LOGO:
+ case ELS_PRLO:
+ res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+@@ -3699,6 +4301,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+ break;
+ }
+
++ case ELS_FLOGI: /* should never happen */
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
+ "qla_target(%d): Unsupported ELS command %x "
+@@ -5016,6 +5619,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
+ if (!sess)
+ goto out_term;
+ } else {
++ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++ sess = NULL;
++ goto out_term;
++ }
++
+ kref_get(&sess->se_sess->sess_kref);
+ }
+
+@@ -5070,6 +5678,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
+ if (!sess)
+ goto out_term;
+ } else {
++ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++ sess = NULL;
++ goto out_term;
++ }
++
+ kref_get(&sess->se_sess->sess_kref);
+ }
+
+diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
+index 332086776dfe..d30c60a1d522 100644
+--- a/drivers/scsi/qla2xxx/qla_target.h
++++ b/drivers/scsi/qla2xxx/qla_target.h
+@@ -167,7 +167,24 @@ struct imm_ntfy_from_isp {
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_ox_id;
+- uint8_t reserved_4[19];
++ union {
++ struct {
++ uint8_t node_name[8];
++ } plogi; /* PLOGI/ADISC/PDISC */
++ struct {
++ /* PRLI word 3 bit 0-15 */
++ uint16_t wd3_lo;
++ uint8_t resv0[6];
++ } prli;
++ struct {
++ uint8_t port_id[3];
++ uint8_t resv1;
++ uint16_t nport_handle;
++ uint16_t resv2;
++ } req_els;
++ } u;
++ uint8_t port_name[8];
++ uint8_t resv3[3];
+ uint8_t vp_index;
+ uint32_t reserved_5;
+ uint8_t port_id[3];
+@@ -234,6 +251,7 @@ struct nack_to_isp {
+ uint8_t reserved[2];
+ uint16_t ox_id;
+ } __packed;
++#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
+ #define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
+ #define NOTIFY_ACK_SRR_FLAGS_REJECT 1
+
+@@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
+ #define FC_TM_REJECT 4
+ #define FC_TM_FAILED 5
+
+-/*
+- * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
+- * terminated, so no more actions is needed and success should be returned
+- * to target.
+- */
+-#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
+-
+ #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
+ #define pci_dma_lo32(a) (a & 0xffffffff)
+ #define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
+@@ -874,6 +885,15 @@ struct qla_tgt_sess_op {
+ struct scsi_qla_host *vha;
+ struct atio_from_isp atio;
+ struct work_struct work;
++ struct list_head cmd_list;
++ bool aborted;
++};
++
++enum qla_sess_deletion {
++ QLA_SESS_DELETION_NONE = 0,
++ QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of
++ * this one */
++ QLA_SESS_DELETION_IN_PROGRESS = 2,
+ };
+
+ /*
+@@ -884,8 +904,15 @@ struct qla_tgt_sess {
+ port_id_t s_id;
+
+ unsigned int conf_compl_supported:1;
+- unsigned int deleted:1;
++ unsigned int deleted:2;
+ unsigned int local:1;
++ unsigned int logout_on_delete:1;
++ unsigned int plogi_ack_needed:1;
++ unsigned int keep_nport_handle:1;
++
++ unsigned char logout_completed;
++
++ int generation;
+
+ struct se_session *se_sess;
+ struct scsi_qla_host *vha;
+@@ -897,6 +924,10 @@ struct qla_tgt_sess {
+
+ uint8_t port_name[WWN_SIZE];
+ struct work_struct free_work;
++
++ union {
++ struct imm_ntfy_from_isp tm_iocb;
++ };
+ };
+
+ struct qla_tgt_cmd {
+@@ -912,7 +943,6 @@ struct qla_tgt_cmd {
+ unsigned int conf_compl_supported:1;
+ unsigned int sg_mapped:1;
+ unsigned int free_sg:1;
+- unsigned int aborted:1; /* Needed in case of SRR */
+ unsigned int write_data_transferred:1;
+ unsigned int ctx_dsd_alloced:1;
+ unsigned int q_full:1;
+@@ -1027,6 +1057,10 @@ struct qla_tgt_srr_ctio {
+ struct qla_tgt_cmd *cmd;
+ };
+
++/* Check for Switch reserved address */
++#define IS_SW_RESV_ADDR(_s_id) \
++ ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
++
+ #define QLA_TGT_XMIT_DATA 1
+ #define QLA_TGT_XMIT_STATUS 2
+ #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
+@@ -1044,7 +1078,7 @@ extern int qlt_lport_register(void *, u64, u64, u64,
+ extern void qlt_lport_deregister(struct scsi_qla_host *);
+ extern void qlt_unreg_sess(struct qla_tgt_sess *);
+ extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
+-extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
++extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
+ extern int __init qlt_init(void);
+ extern void qlt_exit(void);
+ extern void qlt_update_vp_map(struct scsi_qla_host *, int);
+@@ -1074,12 +1108,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
+ ha->host->active_mode |= MODE_INITIATOR;
+ }
+
++static inline uint32_t sid_to_key(const uint8_t *s_id)
++{
++ uint32_t key;
++
++ key = (((unsigned long)s_id[0] << 16) |
++ ((unsigned long)s_id[1] << 8) |
++ (unsigned long)s_id[2]);
++ return key;
++}
++
+ /*
+ * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
+ */
+ extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
+ extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
+ extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
++extern void qlt_abort_cmd(struct qla_tgt_cmd *);
+ extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
+ extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
+ extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
+@@ -1110,5 +1155,7 @@ extern void qlt_stop_phase2(struct qla_tgt *);
+ extern irqreturn_t qla83xx_msix_atio_q(int, void *);
+ extern void qlt_83xx_iospace_config(struct qla_hw_data *);
+ extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
++extern void qlt_logo_completion_handler(fc_port_t *, int);
++extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
+
+ #endif /* __QLA_TARGET_H */
+diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
+index a8c0c7362e48..de46cb370bdc 100644
+--- a/drivers/scsi/qla2xxx/qla_tmpl.c
++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
+@@ -393,6 +393,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
+ if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
+ for (i = 0; i < vha->hw->max_req_queues; i++) {
+ struct req_que *req = vha->hw->req_q_map[i];
++
++ if (!test_bit(i, vha->hw->req_qid_map))
++ continue;
++
+ if (req || !buf) {
+ length = req ?
+ req->length : REQUEST_ENTRY_CNT_24XX;
+@@ -406,6 +410,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
+ } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
+ for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+ struct rsp_que *rsp = vha->hw->rsp_q_map[i];
++
++ if (!test_bit(i, vha->hw->rsp_qid_map))
++ continue;
++
+ if (rsp || !buf) {
+ length = rsp ?
+ rsp->length : RESPONSE_ENTRY_CNT_MQ;
+@@ -632,6 +640,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
+ if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
+ for (i = 0; i < vha->hw->max_req_queues; i++) {
+ struct req_que *req = vha->hw->req_q_map[i];
++
++ if (!test_bit(i, vha->hw->req_qid_map))
++ continue;
++
+ if (req || !buf) {
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(1, buf, len);
+@@ -643,6 +655,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
+ } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
+ for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+ struct rsp_que *rsp = vha->hw->rsp_q_map[i];
++
++ if (!test_bit(i, vha->hw->rsp_qid_map))
++ continue;
++
+ if (rsp || !buf) {
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(1, buf, len);
+diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+index 272a2646a759..c763ca597a83 100644
+--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+@@ -422,7 +422,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
+ cmd->cmd_flags |= BIT_14;
+ }
+
+- return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++ return target_put_sess_cmd(se_cmd);
+ }
+
+ /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
+@@ -662,7 +662,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+ cmd->cmd_flags |= BIT_4;
+ cmd->bufflen = se_cmd->data_length;
+ cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
+- cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->sg = se_cmd->t_data_sg;
+@@ -692,7 +691,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
+ cmd->sg_cnt = 0;
+ cmd->offset = 0;
+ cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
+- cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+ if (cmd->cmd_flags & BIT_5) {
+ pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
+ dump_stack();
+@@ -757,14 +755,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
+ {
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+- struct scsi_qla_host *vha = cmd->vha;
+- struct qla_hw_data *ha = vha->hw;
+-
+- if (!cmd->sg_mapped)
+- return;
+-
+- pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+- cmd->sg_mapped = 0;
++ qlt_abort_cmd(cmd);
+ }
+
+ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
+@@ -1273,9 +1264,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
+ return NULL;
+ }
+
+- key = (((unsigned long)s_id[0] << 16) |
+- ((unsigned long)s_id[1] << 8) |
+- (unsigned long)s_id[2]);
++ key = sid_to_key(s_id);
+ pr_debug("find_sess_by_s_id: 0x%06x\n", key);
+
+ se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
+@@ -1310,9 +1299,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
+ void *slot;
+ int rc;
+
+- key = (((unsigned long)s_id[0] << 16) |
+- ((unsigned long)s_id[1] << 8) |
+- (unsigned long)s_id[2]);
++ key = sid_to_key(s_id);
+ pr_debug("set_sess_by_s_id: %06x\n", key);
+
+ slot = btree_lookup32(&lport->lport_fcport_map, key);
+@@ -1670,6 +1657,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
+ }
+
+ sess->conf_compl_supported = conf_compl_supported;
++
++ /* Reset logout parameters to default */
++ sess->logout_on_delete = 1;
++ sess->keep_nport_handle = 0;
+ }
+
+ /*
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index 9f77d23239a2..6e2256f7d7d6 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -205,6 +205,7 @@ static struct {
+ {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
+ {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
+ {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
++ {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
+ {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
+ {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 062633295bc2..958c732c428e 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -704,7 +704,7 @@ static int iscsit_add_reject_from_cmd(
+ */
+ if (cmd->se_cmd.se_tfo != NULL) {
+ pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
+- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
++ target_put_sess_cmd(&cmd->se_cmd);
+ }
+ return -1;
+ }
+@@ -994,7 +994,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
+ conn->cid);
+
+- target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
++ target_get_sess_cmd(&cmd->se_cmd, true);
+
+ cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
+ scsilun_to_int(&hdr->lun));
+@@ -1060,7 +1060,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return -1;
+ else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
++ target_put_sess_cmd(&cmd->se_cmd);
+ return 0;
+ }
+ }
+@@ -1076,7 +1076,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ if (!cmd->sense_reason)
+ return 0;
+
+- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
++ target_put_sess_cmd(&cmd->se_cmd);
+ return 0;
+ }
+
+@@ -1107,7 +1107,6 @@ static int
+ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+ bool dump_payload)
+ {
+- struct iscsi_conn *conn = cmd->conn;
+ int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ /*
+ * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
+@@ -1134,7 +1133,7 @@ after_immediate_data:
+
+ rc = iscsit_dump_data_payload(cmd->conn,
+ cmd->first_burst_len, 1);
+- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
++ target_put_sess_cmd(&cmd->se_cmd);
+ return rc;
+ } else if (cmd->unsolicited_data)
+ iscsit_set_unsoliticed_dataout(cmd);
+@@ -1804,7 +1803,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ conn->sess->se_sess, 0, DMA_NONE,
+ MSG_SIMPLE_TAG, cmd->sense_buffer + 2);
+
+- target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
++ target_get_sess_cmd(&cmd->se_cmd, true);
+ sess_ref = true;
+
+ switch (function) {
+@@ -1946,7 +1945,7 @@ attach:
+ */
+ if (sess_ref) {
+ pr_debug("Handle TMR, using sess_ref=true check\n");
+- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
++ target_put_sess_cmd(&cmd->se_cmd);
+ }
+
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index 80a1a4fbede0..bf8c6e446b68 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -1961,7 +1961,7 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
+
+ static int lio_check_stop_free(struct se_cmd *se_cmd)
+ {
+- return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++ return target_put_sess_cmd(se_cmd);
+ }
+
+ static void lio_release_cmd(struct se_cmd *se_cmd)
+diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
+index 0b68c2ebce95..9062bdaf26ae 100644
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -746,7 +746,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
+ rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
+ if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
+ __iscsit_free_cmd(cmd, true, shutdown);
+- target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++ target_put_sess_cmd(se_cmd);
+ }
+ break;
+ case ISCSI_OP_REJECT:
+@@ -762,7 +762,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
+ rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
+ if (!rc && shutdown && se_cmd->se_sess) {
+ __iscsit_free_cmd(cmd, true, shutdown);
+- target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++ target_put_sess_cmd(se_cmd);
+ }
+ break;
+ }
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index fa5e157db47b..383074723fde 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -71,7 +71,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
+
+ if (dev) {
+ spin_lock_irqsave(&dev->se_tmr_lock, flags);
+- list_del(&tmr->tmr_list);
++ list_del_init(&tmr->tmr_list);
+ spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+ }
+
+@@ -153,7 +153,7 @@ void core_tmr_abort_task(
+ cancel_work_sync(&se_cmd->work);
+ transport_wait_for_tasks(se_cmd);
+
+- target_put_sess_cmd(se_sess, se_cmd);
++ target_put_sess_cmd(se_cmd);
+ transport_cmd_finish_abort(se_cmd, true);
+
+ printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
+@@ -175,9 +175,11 @@ static void core_tmr_drain_tmr_list(
+ struct list_head *preempt_and_abort_list)
+ {
+ LIST_HEAD(drain_tmr_list);
++ struct se_session *sess;
+ struct se_tmr_req *tmr_p, *tmr_pp;
+ struct se_cmd *cmd;
+ unsigned long flags;
++ bool rc;
+ /*
+ * Release all pending and outgoing TMRs aside from the received
+ * LUN_RESET tmr..
+@@ -203,17 +205,31 @@ static void core_tmr_drain_tmr_list(
+ if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
+ continue;
+
++ sess = cmd->se_sess;
++ if (WARN_ON_ONCE(!sess))
++ continue;
++
++ spin_lock(&sess->sess_cmd_lock);
+ spin_lock(&cmd->t_state_lock);
+ if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+ spin_unlock(&cmd->t_state_lock);
++ spin_unlock(&sess->sess_cmd_lock);
+ continue;
+ }
+ if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
+ spin_unlock(&cmd->t_state_lock);
++ spin_unlock(&sess->sess_cmd_lock);
+ continue;
+ }
++ cmd->transport_state |= CMD_T_ABORTED;
+ spin_unlock(&cmd->t_state_lock);
+
++ rc = kref_get_unless_zero(&cmd->cmd_kref);
++ spin_unlock(&sess->sess_cmd_lock);
++ if (!rc) {
++ printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
++ continue;
++ }
+ list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
+ }
+ spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+@@ -227,7 +243,11 @@ static void core_tmr_drain_tmr_list(
+ (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
+ tmr_p->function, tmr_p->response, cmd->t_state);
+
++ cancel_work_sync(&cmd->work);
++ transport_wait_for_tasks(cmd);
++
+ transport_cmd_finish_abort(cmd, 1);
++ target_put_sess_cmd(cmd);
+ }
+ }
+
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index e786e9104c41..adf96b78e9f0 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1379,7 +1379,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
+ * for fabrics using TARGET_SCF_ACK_KREF that expect a second
+ * kref_put() to happen during fabric packet acknowledgement.
+ */
+- ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
++ ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
+ if (ret)
+ return ret;
+ /*
+@@ -1393,7 +1393,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
+ rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
+ if (rc) {
+ transport_send_check_condition_and_sense(se_cmd, rc, 0);
+- target_put_sess_cmd(se_sess, se_cmd);
++ target_put_sess_cmd(se_cmd);
+ return 0;
+ }
+
+@@ -1544,7 +1544,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
+ se_cmd->se_tmr_req->ref_task_tag = tag;
+
+ /* See target_submit_cmd for commentary */
+- ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
++ ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
+ if (ret) {
+ core_tmr_release_req(se_cmd->se_tmr_req);
+ return ret;
+@@ -2146,7 +2146,7 @@ static int transport_release_cmd(struct se_cmd *cmd)
+ * If this cmd has been setup with target_get_sess_cmd(), drop
+ * the kref and call ->release_cmd() in kref callback.
+ */
+- return target_put_sess_cmd(cmd->se_sess, cmd);
++ return target_put_sess_cmd(cmd);
+ }
+
+ /**
+@@ -2390,13 +2390,12 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+ EXPORT_SYMBOL(transport_generic_free_cmd);
+
+ /* target_get_sess_cmd - Add command to active ->sess_cmd_list
+- * @se_sess: session to reference
+ * @se_cmd: command descriptor to add
+ * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
+ */
+-int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+- bool ack_kref)
++int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
+ {
++ struct se_session *se_sess = se_cmd->se_sess;
+ unsigned long flags;
+ int ret = 0;
+
+@@ -2420,7 +2419,7 @@ out:
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+ if (ret && ack_kref)
+- target_put_sess_cmd(se_sess, se_cmd);
++ target_put_sess_cmd(se_cmd);
+
+ return ret;
+ }
+@@ -2448,11 +2447,12 @@ static void target_release_cmd_kref(struct kref *kref)
+ }
+
+ /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
+- * @se_sess: session to reference
+ * @se_cmd: command descriptor to drop
+ */
+-int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
++int target_put_sess_cmd(struct se_cmd *se_cmd)
+ {
++ struct se_session *se_sess = se_cmd->se_sess;
++
+ if (!se_sess) {
+ se_cmd->se_tfo->release_cmd(se_cmd);
+ return 1;
+@@ -2945,8 +2945,17 @@ static void target_tmr_work(struct work_struct *work)
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+ struct se_device *dev = cmd->se_dev;
+ struct se_tmr_req *tmr = cmd->se_tmr_req;
++ unsigned long flags;
+ int ret;
+
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ if (cmd->transport_state & CMD_T_ABORTED) {
++ tmr->response = TMR_FUNCTION_REJECTED;
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ goto check_stop;
++ }
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
+ switch (tmr->function) {
+ case TMR_ABORT_TASK:
+ core_tmr_abort_task(dev, tmr, cmd->se_sess);
+@@ -2974,9 +2983,17 @@ static void target_tmr_work(struct work_struct *work)
+ break;
+ }
+
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ if (cmd->transport_state & CMD_T_ABORTED) {
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ goto check_stop;
++ }
+ cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
+ cmd->se_tfo->queue_tm_rsp(cmd);
+
++check_stop:
+ transport_cmd_check_stop_to_fabric(cmd);
+ }
+
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index 082304dcbe8c..5222805bfb15 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -655,7 +655,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
+ /* this is called once with whichever end is closed last */
+ static void pty_unix98_shutdown(struct tty_struct *tty)
+ {
+- devpts_kill_index(tty->driver_data, tty->index);
++ struct inode *ptmx_inode;
++
++ if (tty->driver->subtype == PTY_TYPE_MASTER)
++ ptmx_inode = tty->driver_data;
++ else
++ ptmx_inode = tty->link->driver_data;
++ devpts_kill_index(ptmx_inode, tty->index);
++ devpts_del_ref(ptmx_inode);
+ }
+
+ static const struct tty_operations ptm_unix98_ops = {
+@@ -748,6 +755,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
+ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ tty->driver_data = inode;
+
++ /*
++ * In the case where all references to ptmx inode are dropped and we
++ * still have /dev/tty opened pointing to the master/slave pair (ptmx
++ * is closed/released before /dev/tty), we must make sure that the inode
++ * is still valid when we call the final pty_unix98_shutdown, thus we
++ * hold an additional reference to the ptmx inode. For the same /dev/tty
++ * last close case, we also need to make sure the super_block isn't
++ * destroyed (devpts instance unmounted), before /dev/tty is closed and
++ * on its release devpts_kill_index is called.
++ */
++ devpts_add_ref(inode);
++
+ tty_add_file(tty, filp);
+
+ slave_inode = devpts_pty_new(inode,
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 98daabab85cf..f6e5ef5bac5e 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1765,6 +1765,16 @@ pci_wch_ch353_setup(struct serial_private *priv,
+ return pci_default_setup(priv, board, port, idx);
+ }
+
++static int
++pci_wch_ch38x_setup(struct serial_private *priv,
++ const struct pciserial_board *board,
++ struct uart_8250_port *port, int idx)
++{
++ port->port.flags |= UPF_FIXED_TYPE;
++ port->port.type = PORT_16850;
++ return pci_default_setup(priv, board, port, idx);
++}
++
+ #define PCI_VENDOR_ID_SBSMODULARIO 0x124B
+ #define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
+ #define PCI_DEVICE_ID_OCTPRO 0x0001
+@@ -1819,6 +1829,10 @@ pci_wch_ch353_setup(struct serial_private *priv,
+ #define PCI_VENDOR_ID_SUNIX 0x1fd4
+ #define PCI_DEVICE_ID_SUNIX_1999 0x1999
+
++#define PCIE_VENDOR_ID_WCH 0x1c00
++#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
++#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
++#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
+
+ #define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358
+ #define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
+@@ -2530,6 +2544,30 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_wch_ch353_setup,
+ },
++ /* WCH CH382 2S card (16850 clone) */
++ {
++ .vendor = PCIE_VENDOR_ID_WCH,
++ .device = PCIE_DEVICE_ID_WCH_CH382_2S,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_wch_ch38x_setup,
++ },
++ /* WCH CH382 2S1P card (16850 clone) */
++ {
++ .vendor = PCIE_VENDOR_ID_WCH,
++ .device = PCIE_DEVICE_ID_WCH_CH382_2S1P,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_wch_ch38x_setup,
++ },
++ /* WCH CH384 4S card (16850 clone) */
++ {
++ .vendor = PCIE_VENDOR_ID_WCH,
++ .device = PCIE_DEVICE_ID_WCH_CH384_4S,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_wch_ch38x_setup,
++ },
+ /*
+ * ASIX devices with FIFO bug
+ */
+@@ -2828,6 +2866,8 @@ enum pci_board_num_t {
+ pbn_fintek_4,
+ pbn_fintek_8,
+ pbn_fintek_12,
++ pbn_wch382_2,
++ pbn_wch384_4,
+ };
+
+ /*
+@@ -3629,6 +3669,20 @@ static struct pciserial_board pci_boards[] = {
+ .base_baud = 115200,
+ .first_offset = 0x40,
+ },
++ [pbn_wch382_2] = {
++ .flags = FL_BASE0,
++ .num_ports = 2,
++ .base_baud = 115200,
++ .uart_offset = 8,
++ .first_offset = 0xC0,
++ },
++ [pbn_wch384_4] = {
++ .flags = FL_BASE0,
++ .num_ports = 4,
++ .base_baud = 115200,
++ .uart_offset = 8,
++ .first_offset = 0xC0,
++ },
+ };
+
+ static const struct pci_device_id blacklist[] = {
+@@ -3640,6 +3694,8 @@ static const struct pci_device_id blacklist[] = {
+ /* multi-io cards handled by parport_serial */
+ { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
+ { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
++ { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
++ { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
+ };
+
+ /*
+@@ -5363,6 +5419,14 @@ static struct pci_device_id serial_pci_tbl[] = {
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_b0_bt_2_115200 },
+
++ { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0, pbn_wch382_2 },
++
++ { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
++ PCI_ANY_ID, PCI_ANY_ID,
++ 0, 0, pbn_wch384_4 },
++
+ /*
+ * Commtech, Inc. Fastcom adapters
+ */
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index 313f09a73624..be039663359c 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -595,7 +595,7 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
+
+ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
+ {
+- return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++ return target_put_sess_cmd(se_cmd);
+ }
+
+ static void
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index cfd1e6f6ac64..69e596b1f95b 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1371,7 +1371,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ read_extent_buffer(eb, dest + bytes_left,
+ name_off, name_len);
+ if (eb != eb_in) {
+- btrfs_tree_read_unlock_blocking(eb);
++ if (!path->skip_locking)
++ btrfs_tree_read_unlock_blocking(eb);
+ free_extent_buffer(eb);
+ }
+ ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
+@@ -1390,9 +1391,10 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ eb = path->nodes[0];
+ /* make sure we can use eb after releasing the path */
+ if (eb != eb_in) {
+- atomic_inc(&eb->refs);
+- btrfs_tree_read_lock(eb);
+- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
++ if (!path->skip_locking)
++ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
++ path->nodes[0] = NULL;
++ path->locks[0] = 0;
+ }
+ btrfs_release_path(path);
+ iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index de4e70fb3cbb..37848167c4b8 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1689,7 +1689,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
+ *
+ */
+ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+- struct list_head *ins_list)
++ struct list_head *ins_list, bool *emitted)
+ {
+ struct btrfs_dir_item *di;
+ struct btrfs_delayed_item *curr, *next;
+@@ -1733,6 +1733,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+
+ if (over)
+ return 1;
++ *emitted = true;
+ }
+ return 0;
+ }
+diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
+index f70119f25421..0167853c84ae 100644
+--- a/fs/btrfs/delayed-inode.h
++++ b/fs/btrfs/delayed-inode.h
+@@ -144,7 +144,7 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
+ int btrfs_should_delete_dir_index(struct list_head *del_list,
+ u64 index);
+ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+- struct list_head *ins_list);
++ struct list_head *ins_list, bool *emitted);
+
+ /* for init */
+ int __init btrfs_delayed_inode_init(void);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 5db50e8bf52e..211f19aa56ba 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5341,6 +5341,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
+ char *name_ptr;
+ int name_len;
+ int is_curr = 0; /* ctx->pos points to the current index? */
++ bool emitted;
+
+ /* FIXME, use a real flag for deciding about the key type */
+ if (root->fs_info->tree_root == root)
+@@ -5369,6 +5370,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
+ if (ret < 0)
+ goto err;
+
++ emitted = false;
+ while (1) {
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+@@ -5448,6 +5450,7 @@ skip:
+
+ if (over)
+ goto nopos;
++ emitted = true;
+ di_len = btrfs_dir_name_len(leaf, di) +
+ btrfs_dir_data_len(leaf, di) + sizeof(*di);
+ di_cur += di_len;
+@@ -5460,11 +5463,20 @@ next:
+ if (key_type == BTRFS_DIR_INDEX_KEY) {
+ if (is_curr)
+ ctx->pos++;
+- ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
++ ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
+ if (ret)
+ goto nopos;
+ }
+
++ /*
++ * If we haven't emitted any dir entry, we must not touch ctx->pos as
++ * it was was set to the termination value in previous call. We assume
++ * that "." and ".." were emitted if we reach this point and set the
++ * termination value as well for an empty directory.
++ */
++ if (ctx->pos > 2 && !emitted)
++ goto nopos;
++
+ /* Reached end of directory/root. Bump pos past the last item. */
+ ctx->pos++;
+
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index da7fbfaa60b4..cee9889a6612 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -710,7 +710,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+
+ ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
+ if (!ses->auth_key.response) {
+- rc = ENOMEM;
++ rc = -ENOMEM;
+ ses->auth_key.len = 0;
+ goto setup_ntlmv2_rsp_ret;
+ }
+diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
+index cfe8466f7fef..f13aa00ed1d6 100644
+--- a/fs/devpts/inode.c
++++ b/fs/devpts/inode.c
+@@ -569,6 +569,26 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
+ mutex_unlock(&allocated_ptys_lock);
+ }
+
++/*
++ * pty code needs to hold extra references in case of last /dev/tty close
++ */
++
++void devpts_add_ref(struct inode *ptmx_inode)
++{
++ struct super_block *sb = pts_sb_from_inode(ptmx_inode);
++
++ atomic_inc(&sb->s_active);
++ ihold(ptmx_inode);
++}
++
++void devpts_del_ref(struct inode *ptmx_inode)
++{
++ struct super_block *sb = pts_sb_from_inode(ptmx_inode);
++
++ iput(ptmx_inode);
++ deactivate_super(sb);
++}
++
+ /**
+ * devpts_pty_new -- create a new inode in /dev/pts/
+ * @ptmx_inode: inode of the master
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 777f74370143..2ea75cbeb697 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3033,29 +3033,29 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
+ * case, we allocate an io_end structure to hook to the iocb.
+ */
+ iocb->private = NULL;
+- ext4_inode_aio_set(inode, NULL);
+- if (!is_sync_kiocb(iocb)) {
+- io_end = ext4_init_io_end(inode, GFP_NOFS);
+- if (!io_end) {
+- ret = -ENOMEM;
+- goto retake_lock;
+- }
+- /*
+- * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
+- */
+- iocb->private = ext4_get_io_end(io_end);
+- /*
+- * we save the io structure for current async direct
+- * IO, so that later ext4_map_blocks() could flag the
+- * io structure whether there is a unwritten extents
+- * needs to be converted when IO is completed.
+- */
+- ext4_inode_aio_set(inode, io_end);
+- }
+-
+ if (overwrite) {
+ get_block_func = ext4_get_block_write_nolock;
+ } else {
++ ext4_inode_aio_set(inode, NULL);
++ if (!is_sync_kiocb(iocb)) {
++ io_end = ext4_init_io_end(inode, GFP_NOFS);
++ if (!io_end) {
++ ret = -ENOMEM;
++ goto retake_lock;
++ }
++ /*
++ * Grab reference for DIO. Will be dropped in
++ * ext4_end_io_dio()
++ */
++ iocb->private = ext4_get_io_end(io_end);
++ /*
++ * we save the io structure for current async direct
++ * IO, so that later ext4_map_blocks() could flag the
++ * io structure whether there is a unwritten extents
++ * needs to be converted when IO is completed.
++ */
++ ext4_inode_aio_set(inode, io_end);
++ }
+ get_block_func = ext4_get_block_write;
+ dio_flags = DIO_LOCKING;
+ }
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 9f2311bc9c4f..165f309bafcc 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -269,10 +269,12 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
+ unsigned long blocksize = orig_inode->i_sb->s_blocksize;
+ unsigned int w_flags = 0;
+ unsigned int tmp_data_size, data_size, replaced_size;
+- int err2, jblocks, retries = 0;
++ int i, err2, jblocks, retries = 0;
+ int replaced_count = 0;
+ int from = data_offset_in_page << orig_inode->i_blkbits;
+ int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
++ struct super_block *sb = orig_inode->i_sb;
++ struct buffer_head *bh = NULL;
+
+ /*
+ * It needs twice the amount of ordinary journal buffers because
+@@ -386,8 +388,16 @@ data_copy:
+ }
+ /* Perform all necessary steps similar write_begin()/write_end()
+ * but keeping in mind that i_size will not change */
+- *err = __block_write_begin(pagep[0], from, replaced_size,
+- ext4_get_block);
++ if (!page_has_buffers(pagep[0]))
++ create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
++ bh = page_buffers(pagep[0]);
++ for (i = 0; i < data_offset_in_page; i++)
++ bh = bh->b_this_page;
++ for (i = 0; i < block_len_in_page; i++) {
++ *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
++ if (*err < 0)
++ break;
++ }
+ if (!*err)
+ *err = block_commit_write(pagep[0], from, from + replaced_size);
+
+@@ -405,10 +415,13 @@ unlock_pages:
+ page_cache_release(pagep[1]);
+ stop_journal:
+ ext4_journal_stop(handle);
++ if (*err == -ENOSPC &&
++ ext4_should_retry_alloc(sb, &retries))
++ goto again;
+ /* Buffer was busy because probably is pinned to journal transaction,
+ * force transaction commit may help to free it. */
+- if (*err == -EBUSY && ext4_should_retry_alloc(orig_inode->i_sb,
+- &retries))
++ if (*err == -EBUSY && retries++ < 4 && EXT4_SB(sb)->s_journal &&
++ jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal))
+ goto again;
+ return replaced_count;
+
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index ca4588388fc3..77c81c64a47e 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -186,7 +186,7 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
+ if (flex_gd == NULL)
+ goto out3;
+
+- if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
++ if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
+ goto out2;
+ flex_gd->count = flexbg_size;
+
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 2d609a5fbfea..a07634599cd7 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -705,6 +705,21 @@ static long writeback_sb_inodes(struct super_block *sb,
+
+ work->nr_pages -= write_chunk - wbc.nr_to_write;
+ wrote += write_chunk - wbc.nr_to_write;
++
++ if (need_resched()) {
++ /*
++ * We're trying to balance between building up a nice
++ * long list of IOs to improve our merge rate, and
++ * getting those IOs out quickly for anyone throttling
++ * in balance_dirty_pages(). cond_resched() doesn't
++ * unplug, so get our IOs out the door before we
++ * give up the CPU.
++ */
++ blk_flush_plug(current);
++ cond_resched();
++ }
++
++
+ spin_lock(&wb->list_lock);
+ spin_lock(&inode->i_lock);
+ if (!(inode->i_state & I_DIRTY))
+@@ -712,7 +727,7 @@ static long writeback_sb_inodes(struct super_block *sb,
+ requeue_inode(inode, wb, &wbc);
+ inode_sync_complete(inode);
+ spin_unlock(&inode->i_lock);
+- cond_resched_lock(&wb->list_lock);
++
+ /*
+ * bail out to wb_writeback() often enough to check
+ * background threshold and other termination conditions.
+diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
+index 0419485891f2..0f1c6f315cdc 100644
+--- a/include/asm-generic/cputime_nsecs.h
++++ b/include/asm-generic/cputime_nsecs.h
+@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
+ */
+ static inline cputime_t timespec_to_cputime(const struct timespec *val)
+ {
+- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
++ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+ return (__force cputime_t) ret;
+ }
+ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+ */
+ static inline cputime_t timeval_to_cputime(const struct timeval *val)
+ {
+- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
++ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
++ val->tv_usec * NSEC_PER_USEC;
+ return (__force cputime_t) ret;
+ }
+ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 2bd394ed35f6..1a6f01df1496 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -138,7 +138,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+ */
+ #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
+ #define __trace_if(cond) \
+- if (__builtin_constant_p((cond)) ? !!(cond) : \
++ if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
+ ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
+index 251a2090a554..e0ee0b3000b2 100644
+--- a/include/linux/devpts_fs.h
++++ b/include/linux/devpts_fs.h
+@@ -19,6 +19,8 @@
+
+ int devpts_new_index(struct inode *ptmx_inode);
+ void devpts_kill_index(struct inode *ptmx_inode, int idx);
++void devpts_add_ref(struct inode *ptmx_inode);
++void devpts_del_ref(struct inode *ptmx_inode);
+ /* mknod in devpts */
+ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
+ void *priv);
+@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
+ /* Dummy stubs in the no-pty case */
+ static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
+ static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
++static inline void devpts_add_ref(struct inode *ptmx_inode) { }
++static inline void devpts_del_ref(struct inode *ptmx_inode) { }
+ static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
+ dev_t device, int index, void *priv)
+ {
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index c72851328ca9..72486551c4ca 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -14,8 +14,10 @@
+ * See the file COPYING for more details.
+ */
+
++#include <linux/smp.h>
+ #include <linux/errno.h>
+ #include <linux/types.h>
++#include <linux/cpumask.h>
+ #include <linux/rcupdate.h>
+ #include <linux/static_key.h>
+
+@@ -121,6 +123,9 @@ extern void syscall_unregfunc(void);
+ void *it_func; \
+ void *__data; \
+ \
++ if (!cpu_online(raw_smp_processor_id())) \
++ return; \
++ \
+ if (!(cond)) \
+ return; \
+ prercu; \
+diff --git a/include/sound/pcm.h b/include/sound/pcm.h
+index 8bb00a27e219..e02b1b8d8ee4 100644
+--- a/include/sound/pcm.h
++++ b/include/sound/pcm.h
+@@ -535,6 +535,12 @@ snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size)
+ * PCM library
+ */
+
++/**
++ * snd_pcm_stream_linked - Check whether the substream is linked with others
++ * @substream: substream to check
++ *
++ * Returns true if the given substream is being linked with others.
++ */
+ static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream)
+ {
+ return substream->group != &substream->self_group;
+@@ -545,6 +551,16 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream);
+ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream);
+ void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream);
+ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
++
++/**
++ * snd_pcm_stream_lock_irqsave - Lock the PCM stream
++ * @substream: PCM substream
++ * @flags: irq flags
++ *
++ * This locks the PCM stream like snd_pcm_stream_lock() but with the local
++ * IRQ (only when nonatomic is false). In nonatomic case, this is identical
++ * as snd_pcm_stream_lock().
++ */
+ #define snd_pcm_stream_lock_irqsave(substream, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+@@ -553,9 +569,25 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
+ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
+ unsigned long flags);
+
++/**
++ * snd_pcm_group_for_each_entry - iterate over the linked substreams
++ * @s: the iterator
++ * @substream: the substream
++ *
++ * Iterate over the all linked substreams to the given @substream.
++ * When @substream isn't linked with any others, this gives returns @substream
++ * itself once.
++ */
+ #define snd_pcm_group_for_each_entry(s, substream) \
+ list_for_each_entry(s, &substream->group->substreams, link_list)
+
++/**
++ * snd_pcm_running - Check whether the substream is in a running state
++ * @substream: substream to check
++ *
++ * Returns true if the given substream is in the state RUNNING, or in the
++ * state DRAINING for playback.
++ */
+ static inline int snd_pcm_running(struct snd_pcm_substream *substream)
+ {
+ return (substream->runtime->status->state == SNDRV_PCM_STATE_RUNNING ||
+@@ -563,45 +595,81 @@ static inline int snd_pcm_running(struct snd_pcm_substream *substream)
+ substream->stream == SNDRV_PCM_STREAM_PLAYBACK));
+ }
+
++/**
++ * bytes_to_samples - Unit conversion of the size from bytes to samples
++ * @runtime: PCM runtime instance
++ * @size: size in bytes
++ */
+ static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t size)
+ {
+ return size * 8 / runtime->sample_bits;
+ }
+
++/**
++ * bytes_to_frames - Unit conversion of the size from bytes to frames
++ * @runtime: PCM runtime instance
++ * @size: size in bytes
++ */
+ static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime, ssize_t size)
+ {
+ return size * 8 / runtime->frame_bits;
+ }
+
++/**
++ * samples_to_bytes - Unit conversion of the size from samples to bytes
++ * @runtime: PCM runtime instance
++ * @size: size in samples
++ */
+ static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t size)
+ {
+ return size * runtime->sample_bits / 8;
+ }
+
++/**
++ * frames_to_bytes - Unit conversion of the size from frames to bytes
++ * @runtime: PCM runtime instance
++ * @size: size in frames
++ */
+ static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_sframes_t size)
+ {
+ return size * runtime->frame_bits / 8;
+ }
+
++/**
++ * frame_aligned - Check whether the byte size is aligned to frames
++ * @runtime: PCM runtime instance
++ * @bytes: size in bytes
++ */
+ static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes)
+ {
+ return bytes % runtime->byte_align == 0;
+ }
+
++/**
++ * snd_pcm_lib_buffer_bytes - Get the buffer size of the current PCM in bytes
++ * @substream: PCM substream
++ */
+ static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substream)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ return frames_to_bytes(runtime, runtime->buffer_size);
+ }
+
++/**
++ * snd_pcm_lib_period_bytes - Get the period size of the current PCM in bytes
++ * @substream: PCM substream
++ */
+ static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substream)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ return frames_to_bytes(runtime, runtime->period_size);
+ }
+
+-/*
+- * result is: 0 ... (boundary - 1)
++/**
++ * snd_pcm_playback_avail - Get the available (writable) space for playback
++ * @runtime: PCM runtime instance
++ *
++ * Result is between 0 ... (boundary - 1)
+ */
+ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *runtime)
+ {
+@@ -613,8 +681,11 @@ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *r
+ return avail;
+ }
+
+-/*
+- * result is: 0 ... (boundary - 1)
++/**
++ * snd_pcm_playback_avail - Get the available (readable) space for capture
++ * @runtime: PCM runtime instance
++ *
++ * Result is between 0 ... (boundary - 1)
+ */
+ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *runtime)
+ {
+@@ -624,11 +695,19 @@ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *ru
+ return avail;
+ }
+
++/**
++ * snd_pcm_playback_hw_avail - Get the queued space for playback
++ * @runtime: PCM runtime instance
++ */
+ static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime *runtime)
+ {
+ return runtime->buffer_size - snd_pcm_playback_avail(runtime);
+ }
+
++/**
++ * snd_pcm_capture_hw_avail - Get the free space for capture
++ * @runtime: PCM runtime instance
++ */
+ static inline snd_pcm_sframes_t snd_pcm_capture_hw_avail(struct snd_pcm_runtime *runtime)
+ {
+ return runtime->buffer_size - snd_pcm_capture_avail(runtime);
+@@ -708,6 +787,20 @@ static inline int snd_pcm_capture_empty(struct snd_pcm_substream *substream)
+ return snd_pcm_capture_avail(runtime) == 0;
+ }
+
++/**
++ * snd_pcm_trigger_done - Mark the master substream
++ * @substream: the pcm substream instance
++ * @master: the linked master substream
++ *
++ * When multiple substreams of the same card are linked and the hardware
++ * supports the single-shot operation, the driver calls this in the loop
++ * in snd_pcm_group_for_each_entry() for marking the substream as "done".
++ * Then most of trigger operations are performed only to the given master
++ * substream.
++ *
++ * The trigger_master mark is cleared at timestamp updates at the end
++ * of trigger operations.
++ */
+ static inline void snd_pcm_trigger_done(struct snd_pcm_substream *substream,
+ struct snd_pcm_substream *master)
+ {
+@@ -883,6 +976,14 @@ unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit);
+ unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a,
+ unsigned int rates_b);
+
++/**
++ * snd_pcm_set_runtime_buffer - Set the PCM runtime buffer
++ * @substream: PCM substream to set
++ * @bufp: the buffer information, NULL to clear
++ *
++ * Copy the buffer information to runtime->dma_buffer when @bufp is non-NULL.
++ * Otherwise it clears the current buffer information.
++ */
+ static inline void snd_pcm_set_runtime_buffer(struct snd_pcm_substream *substream,
+ struct snd_dma_buffer *bufp)
+ {
+@@ -908,6 +1009,11 @@ void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream);
+ void snd_pcm_timer_init(struct snd_pcm_substream *substream);
+ void snd_pcm_timer_done(struct snd_pcm_substream *substream);
+
++/**
++ * snd_pcm_gettime - Fill the timespec depending on the timestamp mode
++ * @runtime: PCM runtime instance
++ * @tv: timespec to fill
++ */
+ static inline void snd_pcm_gettime(struct snd_pcm_runtime *runtime,
+ struct timespec *tv)
+ {
+@@ -998,18 +1104,35 @@ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
+ #define snd_pcm_sgbuf_ops_page NULL
+ #endif /* SND_DMA_SGBUF */
+
++/**
++ * snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset
++ * @substream: PCM substream
++ * @ofs: byte offset
++ */
+ static inline dma_addr_t
+ snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
+ {
+ return snd_sgbuf_get_addr(snd_pcm_get_dma_buf(substream), ofs);
+ }
+
++/**
++ * snd_pcm_sgbuf_get_ptr - Get the virtual address at the corresponding offset
++ * @substream: PCM substream
++ * @ofs: byte offset
++ */
+ static inline void *
+ snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs)
+ {
+ return snd_sgbuf_get_ptr(snd_pcm_get_dma_buf(substream), ofs);
+ }
+
++/**
++ * snd_pcm_sgbuf_chunk_size - Compute the max size that fits within the contig.
++ * page from the given size
++ * @substream: PCM substream
++ * @ofs: byte offset
++ * @size: byte size to examine
++ */
+ static inline unsigned int
+ snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
+ unsigned int ofs, unsigned int size)
+@@ -1017,13 +1140,24 @@ snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
+ return snd_sgbuf_get_chunk_size(snd_pcm_get_dma_buf(substream), ofs, size);
+ }
+
+-/* handle mmap counter - PCM mmap callback should handle this counter properly */
++/**
++ * snd_pcm_mmap_data_open - increase the mmap counter
++ * @area: VMA
++ *
++ * PCM mmap callback should handle this counter properly
++ */
+ static inline void snd_pcm_mmap_data_open(struct vm_area_struct *area)
+ {
+ struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data;
+ atomic_inc(&substream->mmap_count);
+ }
+
++/**
++ * snd_pcm_mmap_data_close - decrease the mmap counter
++ * @area: VMA
++ *
++ * PCM mmap callback should handle this counter properly
++ */
+ static inline void snd_pcm_mmap_data_close(struct vm_area_struct *area)
+ {
+ struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data;
+@@ -1043,6 +1177,11 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s
+
+ #define snd_pcm_lib_mmap_vmalloc NULL
+
++/**
++ * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer
++ * @dma: DMA number
++ * @max: pointer to store the max size
++ */
+ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
+ {
+ *max = dma < 4 ? 64 * 1024 : 128 * 1024;
+@@ -1095,7 +1234,11 @@ struct snd_pcm_chmap {
+ void *private_data; /* optional: private data pointer */
+ };
+
+-/* get the PCM substream assigned to the given chmap info */
++/**
++ * snd_pcm_chmap_substream - get the PCM substream assigned to the given chmap info
++ * @info: chmap information
++ * @idx: the substream number index
++ */
+ static inline struct snd_pcm_substream *
+ snd_pcm_chmap_substream(struct snd_pcm_chmap *info, unsigned int idx)
+ {
+@@ -1122,7 +1265,10 @@ int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
+ unsigned long private_value,
+ struct snd_pcm_chmap **info_ret);
+
+-/* Strong-typed conversion of pcm_format to bitwise */
++/**
++ * pcm_format_to_bits - Strong-typed conversion of pcm_format to bitwise
++ * @pcm_format: PCM format
++ */
+ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
+ {
+ return 1ULL << (__force int) pcm_format;
+diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
+index 22a4e98eec80..e21dbdb0fe3d 100644
+--- a/include/target/target_core_fabric.h
++++ b/include/target/target_core_fabric.h
+@@ -126,8 +126,8 @@ bool transport_wait_for_tasks(struct se_cmd *);
+ int transport_check_aborted_status(struct se_cmd *, int);
+ int transport_send_check_condition_and_sense(struct se_cmd *,
+ sense_reason_t, int);
+-int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
+-int target_put_sess_cmd(struct se_session *, struct se_cmd *);
++int target_get_sess_cmd(struct se_cmd *, bool);
++int target_put_sess_cmd(struct se_cmd *);
+ void target_sess_cmd_list_set_waiting(struct se_session *);
+ void target_wait_for_sess_cmds(struct se_session *);
+
+diff --git a/lib/klist.c b/lib/klist.c
+index 89b485a2a58d..2a072bfaeace 100644
+--- a/lib/klist.c
++++ b/lib/klist.c
+@@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
+ struct klist_node *n)
+ {
+ i->i_klist = k;
+- i->i_cur = n;
+- if (n)
+- kref_get(&n->n_ref);
++ i->i_cur = NULL;
++ if (n && kref_get_unless_zero(&n->n_ref))
++ i->i_cur = n;
+ }
+ EXPORT_SYMBOL_GPL(klist_iter_init_node);
+
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index 4ada1a97a60b..e1998df4c160 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -24,6 +24,7 @@
+ #include <linux/evm.h>
+ #include <linux/magic.h>
+ #include <crypto/hash.h>
++#include <crypto/algapi.h>
+ #include "evm.h"
+
+ int evm_initialized;
+@@ -149,7 +150,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+ xattr_value_len, calc.digest);
+ if (rc)
+ break;
+- rc = memcmp(xattr_data->digest, calc.digest,
++ rc = crypto_memneq(xattr_data->digest, calc.digest,
+ sizeof(calc.digest));
+ if (rc)
+ rc = -EINVAL;
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index c6ff94ab1ad6..b917a47a7bb6 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -220,6 +220,10 @@ static char *snd_pcm_format_names[] = {
+ FORMAT(DSD_U32_BE),
+ };
+
++/**
++ * snd_pcm_format_name - Return a name string for the given PCM format
++ * @format: PCM format
++ */
+ const char *snd_pcm_format_name(snd_pcm_format_t format)
+ {
+ if ((__force unsigned int)format >= ARRAY_SIZE(snd_pcm_format_names))
+@@ -709,7 +713,6 @@ int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count)
+ }
+ return 0;
+ }
+-
+ EXPORT_SYMBOL(snd_pcm_new_stream);
+
+ static int _snd_pcm_new(struct snd_card *card, const char *id, int device,
+@@ -1157,6 +1160,15 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
+ return 0;
+ }
+
++/**
++ * snd_pcm_notify - Add/remove the notify list
++ * @notify: PCM notify list
++ * @nfree: 0 = register, 1 = unregister
++ *
++ * This adds the given notifier to the global list so that the callback is
++ * called for each registered PCM devices. This exists only for PCM OSS
++ * emulation, so far.
++ */
+ int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree)
+ {
+ struct snd_pcm *pcm;
+@@ -1179,7 +1191,6 @@ int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree)
+ mutex_unlock(&register_mutex);
+ return 0;
+ }
+-
+ EXPORT_SYMBOL(snd_pcm_notify);
+
+ #ifdef CONFIG_PROC_FS
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 9c823cfdfff0..b04802c6ffb9 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -77,6 +77,26 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
+ static DEFINE_RWLOCK(snd_pcm_link_rwlock);
+ static DECLARE_RWSEM(snd_pcm_link_rwsem);
+
++/* Writer in rwsem may block readers even during its waiting in queue,
++ * and this may lead to a deadlock when the code path takes read sem
++ * twice (e.g. one in snd_pcm_action_nonatomic() and another in
++ * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
++ * spin until it gets the lock.
++ */
++static inline void down_write_nonblock(struct rw_semaphore *lock)
++{
++ while (!down_write_trylock(lock))
++ cond_resched();
++}
++
++/**
++ * snd_pcm_stream_lock - Lock the PCM stream
++ * @substream: PCM substream
++ *
++ * This locks the PCM stream's spinlock or mutex depending on the nonatomic
++ * flag of the given substream. This also takes the global link rw lock
++ * (or rw sem), too, for avoiding the race with linked streams.
++ */
+ void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
+ {
+ if (substream->pcm->nonatomic) {
+@@ -89,6 +109,12 @@ void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
+ }
+ EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
+
++/**
++ * snd_pcm_stream_lock - Unlock the PCM stream
++ * @substream: PCM substream
++ *
++ * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
++ */
+ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
+ {
+ if (substream->pcm->nonatomic) {
+@@ -101,6 +127,14 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
+ }
+ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
+
++/**
++ * snd_pcm_stream_lock_irq - Lock the PCM stream
++ * @substream: PCM substream
++ *
++ * This locks the PCM stream like snd_pcm_stream_lock() and disables the local
++ * IRQ (only when nonatomic is false). In nonatomic case, this is identical
++ * as snd_pcm_stream_lock().
++ */
+ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
+ {
+ if (!substream->pcm->nonatomic)
+@@ -109,6 +143,12 @@ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
+ }
+ EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
+
++/**
++ * snd_pcm_stream_unlock_irq - Unlock the PCM stream
++ * @substream: PCM substream
++ *
++ * This is a counter-part of snd_pcm_stream_lock_irq().
++ */
+ void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
+ {
+ snd_pcm_stream_unlock(substream);
+@@ -127,6 +167,13 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
+ }
+ EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
+
++/**
++ * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
++ * @substream: PCM substream
++ * @flags: irq flags
++ *
++ * This is a counter-part of snd_pcm_stream_lock_irqsave().
++ */
+ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
+ unsigned long flags)
+ {
+@@ -1747,7 +1794,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
+ res = -ENOMEM;
+ goto _nolock;
+ }
+- down_write(&snd_pcm_link_rwsem);
++ down_write_nonblock(&snd_pcm_link_rwsem);
+ write_lock_irq(&snd_pcm_link_rwlock);
+ if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
+ substream->runtime->status->state != substream1->runtime->status->state ||
+@@ -1794,7 +1841,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
+ struct snd_pcm_substream *s;
+ int res = 0;
+
+- down_write(&snd_pcm_link_rwsem);
++ down_write_nonblock(&snd_pcm_link_rwsem);
+ write_lock_irq(&snd_pcm_link_rwlock);
+ if (!snd_pcm_stream_linked(substream)) {
+ res = -EALREADY;
+@@ -3319,6 +3366,15 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
+ /*
+ * mmap the DMA buffer on RAM
+ */
++
++/**
++ * snd_pcm_lib_default_mmap - Default PCM data mmap function
++ * @substream: PCM substream
++ * @area: VMA
++ *
++ * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL,
++ * this function is invoked implicitly.
++ */
+ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *area)
+ {
+@@ -3354,6 +3410,15 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
+ * mmap the DMA buffer on I/O memory area
+ */
+ #if SNDRV_PCM_INFO_MMAP_IOMEM
++/**
++ * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
++ * @substream: PCM substream
++ * @area: VMA
++ *
++ * When your hardware uses the iomapped pages as the hardware buffer and
++ * wants to mmap it, pass this function as mmap pcm_ops. Note that this
++ * is supposed to work only on limited architectures.
++ */
+ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
+ struct vm_area_struct *area)
+ {
+diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
+index 53a403e17c5b..1d5acbe0c08b 100644
+--- a/sound/core/seq/seq_fifo.c
++++ b/sound/core/seq/seq_fifo.c
+@@ -33,10 +33,8 @@ struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
+ struct snd_seq_fifo *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+- if (f == NULL) {
+- pr_debug("ALSA: seq: malloc failed for snd_seq_fifo_new() \n");
++ if (!f)
+ return NULL;
+- }
+
+ f->pool = snd_seq_pool_new(poolsize);
+ if (f->pool == NULL) {
+diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
+index ba8e4a64e13e..c850345c43b5 100644
+--- a/sound/core/seq/seq_memory.c
++++ b/sound/core/seq/seq_memory.c
+@@ -383,17 +383,20 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
+
+ if (snd_BUG_ON(!pool))
+ return -EINVAL;
+- if (pool->ptr) /* should be atomic? */
+- return 0;
+
+- pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
+- if (pool->ptr == NULL) {
+- pr_debug("ALSA: seq: malloc for sequencer events failed\n");
++ cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
++ if (!cellptr)
+ return -ENOMEM;
+- }
+
+ /* add new cells to the free cell list */
+ spin_lock_irqsave(&pool->lock, flags);
++ if (pool->ptr) {
++ spin_unlock_irqrestore(&pool->lock, flags);
++ vfree(cellptr);
++ return 0;
++ }
++
++ pool->ptr = cellptr;
+ pool->free = NULL;
+
+ for (cell = 0; cell < pool->size; cell++) {
+@@ -463,10 +466,8 @@ struct snd_seq_pool *snd_seq_pool_new(int poolsize)
+
+ /* create pool block */
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+- if (pool == NULL) {
+- pr_debug("ALSA: seq: malloc failed for pool\n");
++ if (!pool)
+ return NULL;
+- }
+ spin_lock_init(&pool->lock);
+ pool->ptr = NULL;
+ pool->free = NULL;
+diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
+index 2dcdf81e0abb..9c1c8d50f593 100644
+--- a/sound/core/seq/seq_ports.c
++++ b/sound/core/seq/seq_ports.c
+@@ -141,10 +141,8 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
+
+ /* create a new port */
+ new_port = kzalloc(sizeof(*new_port), GFP_KERNEL);
+- if (! new_port) {
+- pr_debug("ALSA: seq: malloc failed for registering client port\n");
++ if (!new_port)
+ return NULL; /* failure, out of memory */
+- }
+ /* init port data */
+ new_port->addr.client = client->number;
+ new_port->addr.port = -1;
+@@ -540,19 +538,22 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+ bool is_src, bool ack)
+ {
+ struct snd_seq_port_subs_info *grp;
++ struct list_head *list;
++ bool empty;
+
+ grp = is_src ? &port->c_src : &port->c_dest;
++ list = is_src ? &subs->src_list : &subs->dest_list;
+ down_write(&grp->list_mutex);
+ write_lock_irq(&grp->list_lock);
+- if (is_src)
+- list_del(&subs->src_list);
+- else
+- list_del(&subs->dest_list);
++ empty = list_empty(list);
++ if (!empty)
++ list_del_init(list);
+ grp->exclusive = 0;
+ write_unlock_irq(&grp->list_lock);
+ up_write(&grp->list_mutex);
+
+- unsubscribe_port(client, port, grp, &subs->info, ack);
++ if (!empty)
++ unsubscribe_port(client, port, grp, &subs->info, ack);
+ }
+
+ /* connect two ports */
+diff --git a/sound/core/seq/seq_prioq.c b/sound/core/seq/seq_prioq.c
+index 021b02bc9330..bc1c8488fc2a 100644
+--- a/sound/core/seq/seq_prioq.c
++++ b/sound/core/seq/seq_prioq.c
+@@ -59,10 +59,8 @@ struct snd_seq_prioq *snd_seq_prioq_new(void)
+ struct snd_seq_prioq *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+- if (f == NULL) {
+- pr_debug("ALSA: seq: malloc failed for snd_seq_prioq_new()\n");
++ if (!f)
+ return NULL;
+- }
+
+ spin_lock_init(&f->lock);
+ f->head = NULL;
+diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
+index aad4878cee55..a0cda38205b9 100644
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -111,10 +111,8 @@ static struct snd_seq_queue *queue_new(int owner, int locked)
+ struct snd_seq_queue *q;
+
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+- if (q == NULL) {
+- pr_debug("ALSA: seq: malloc failed for snd_seq_queue_new()\n");
++ if (!q)
+ return NULL;
+- }
+
+ spin_lock_init(&q->owner_lock);
+ spin_lock_init(&q->check_lock);
+diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
+index c943dc41c6fe..a2468f1101d1 100644
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -56,10 +56,8 @@ struct snd_seq_timer *snd_seq_timer_new(void)
+ struct snd_seq_timer *tmr;
+
+ tmr = kzalloc(sizeof(*tmr), GFP_KERNEL);
+- if (tmr == NULL) {
+- pr_debug("ALSA: seq: malloc failed for snd_seq_timer_new() \n");
++ if (!tmr)
+ return NULL;
+- }
+ spin_lock_init(&tmr->lock);
+
+ /* reset setup to defaults */
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 942f36eb6946..4927a3c88340 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -65,6 +65,7 @@ struct snd_timer_user {
+ int qtail;
+ int qused;
+ int queue_size;
++ bool disconnected;
+ struct snd_timer_read *queue;
+ struct snd_timer_tread *tqueue;
+ spinlock_t qlock;
+@@ -290,6 +291,9 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ mutex_unlock(&register_mutex);
+ return -ENOMEM;
+ }
++ /* take a card refcount for safe disconnection */
++ if (timer->card)
++ get_device(&timer->card->card_dev);
+ timeri->slave_class = tid->dev_sclass;
+ timeri->slave_id = slave_id;
+ if (list_empty(&timer->open_list_head) && timer->hw.open)
+@@ -359,6 +363,9 @@ int snd_timer_close(struct snd_timer_instance *timeri)
+ }
+ spin_unlock(&timer->lock);
+ spin_unlock_irq(&slave_active_lock);
++ /* release a card refcount for safe disconnection */
++ if (timer->card)
++ put_device(&timer->card->card_dev);
+ mutex_unlock(&register_mutex);
+ }
+ out:
+@@ -415,7 +422,7 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
+ spin_lock_irqsave(&timer->lock, flags);
+ list_for_each_entry(ts, &ti->slave_active_head, active_list)
+ if (ts->ccallback)
+- ts->ccallback(ti, event + 100, &tstamp, resolution);
++ ts->ccallback(ts, event + 100, &tstamp, resolution);
+ spin_unlock_irqrestore(&timer->lock, flags);
+ }
+
+@@ -479,6 +486,8 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
+ timer = timeri->timer;
+ if (timer == NULL)
+ return -EINVAL;
++ if (timer->card && timer->card->shutdown)
++ return -ENODEV;
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+ SNDRV_TIMER_IFLG_START)) {
+@@ -509,9 +518,13 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
+ spin_unlock_irqrestore(&slave_active_lock, flags);
+ return -EBUSY;
+ }
++ if (timeri->timer)
++ spin_lock(&timeri->timer->lock);
+ timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+ list_del_init(&timeri->ack_list);
+ list_del_init(&timeri->active_list);
++ if (timeri->timer)
++ spin_unlock(&timeri->timer->lock);
+ spin_unlock_irqrestore(&slave_active_lock, flags);
+ goto __end;
+ }
+@@ -526,6 +539,10 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
+ }
+ list_del_init(&timeri->ack_list);
+ list_del_init(&timeri->active_list);
++ if (timer->card && timer->card->shutdown) {
++ spin_unlock_irqrestore(&timer->lock, flags);
++ return 0;
++ }
+ if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
+ !(--timer->running)) {
+ timer->hw.stop(timer);
+@@ -586,6 +603,8 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
+ timer = timeri->timer;
+ if (! timer)
+ return -EINVAL;
++ if (timer->card && timer->card->shutdown)
++ return -ENODEV;
+ spin_lock_irqsave(&timer->lock, flags);
+ if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+ result = -EBUSY;
+@@ -654,6 +673,9 @@ static void snd_timer_tasklet(unsigned long arg)
+ unsigned long resolution, ticks;
+ unsigned long flags;
+
++ if (timer->card && timer->card->shutdown)
++ return;
++
+ spin_lock_irqsave(&timer->lock, flags);
+ /* now process all callbacks */
+ while (!list_empty(&timer->sack_list_head)) {
+@@ -694,6 +716,9 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
+ if (timer == NULL)
+ return;
+
++ if (timer->card && timer->card->shutdown)
++ return;
++
+ spin_lock_irqsave(&timer->lock, flags);
+
+ /* remember the current resolution */
+@@ -906,11 +931,28 @@ static int snd_timer_dev_register(struct snd_device *dev)
+ return 0;
+ }
+
++/* just for reference in snd_timer_dev_disconnect() below */
++static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
++ int event, struct timespec *tstamp,
++ unsigned long resolution);
++
+ static int snd_timer_dev_disconnect(struct snd_device *device)
+ {
+ struct snd_timer *timer = device->device_data;
++ struct snd_timer_instance *ti;
++
+ mutex_lock(&register_mutex);
+ list_del_init(&timer->device_list);
++ /* wake up pending sleepers */
++ list_for_each_entry(ti, &timer->open_list_head, open_list) {
++ /* FIXME: better to have a ti.disconnect() op */
++ if (ti->ccallback == snd_timer_user_ccallback) {
++ struct snd_timer_user *tu = ti->callback_data;
++
++ tu->disconnected = true;
++ wake_up(&tu->qchange_sleep);
++ }
++ }
+ mutex_unlock(&register_mutex);
+ return 0;
+ }
+@@ -921,6 +963,8 @@ void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstam
+ unsigned long resolution = 0;
+ struct snd_timer_instance *ti, *ts;
+
++ if (timer->card && timer->card->shutdown)
++ return;
+ if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
+ return;
+ if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
+@@ -1081,6 +1125,8 @@ static void snd_timer_proc_read(struct snd_info_entry *entry,
+
+ mutex_lock(&register_mutex);
+ list_for_each_entry(timer, &snd_timer_list, device_list) {
++ if (timer->card && timer->card->shutdown)
++ continue;
+ switch (timer->tmr_class) {
+ case SNDRV_TIMER_CLASS_GLOBAL:
+ snd_iprintf(buffer, "G%i: ", timer->tmr_device);
+@@ -1876,6 +1922,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ {
+ struct snd_timer_user *tu;
+ long result = 0, unit;
++ int qhead;
+ int err = 0;
+
+ tu = file->private_data;
+@@ -1887,7 +1934,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+
+ if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
+ err = -EAGAIN;
+- break;
++ goto _error;
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+@@ -1900,40 +1947,39 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+
+ remove_wait_queue(&tu->qchange_sleep, &wait);
+
++ if (tu->disconnected) {
++ err = -ENODEV;
++ goto _error;
++ }
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+- break;
++ goto _error;
+ }
+ }
+
++ qhead = tu->qhead++;
++ tu->qhead %= tu->queue_size;
+ spin_unlock_irq(&tu->qlock);
+- if (err < 0)
+- goto _error;
+
+ if (tu->tread) {
+- if (copy_to_user(buffer, &tu->tqueue[tu->qhead++],
+- sizeof(struct snd_timer_tread))) {
++ if (copy_to_user(buffer, &tu->tqueue[qhead],
++ sizeof(struct snd_timer_tread)))
+ err = -EFAULT;
+- goto _error;
+- }
+ } else {
+- if (copy_to_user(buffer, &tu->queue[tu->qhead++],
+- sizeof(struct snd_timer_read))) {
++ if (copy_to_user(buffer, &tu->queue[qhead],
++ sizeof(struct snd_timer_read)))
+ err = -EFAULT;
+- goto _error;
+- }
+ }
+
+- tu->qhead %= tu->queue_size;
+-
+- result += unit;
+- buffer += unit;
+-
+ spin_lock_irq(&tu->qlock);
+ tu->qused--;
++ if (err < 0)
++ goto _error;
++ result += unit;
++ buffer += unit;
+ }
+- spin_unlock_irq(&tu->qlock);
+ _error:
++ spin_unlock_irq(&tu->qlock);
+ return result > 0 ? result : err;
+ }
+
+@@ -1949,6 +1995,8 @@ static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
+ mask = 0;
+ if (tu->qused)
+ mask |= POLLIN | POLLRDNORM;
++ if (tu->disconnected)
++ mask |= POLLERR;
+
+ return mask;
+ }
+diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
+index 1e29a1983791..387bb8f603ac 100644
+--- a/sound/drivers/dummy.c
++++ b/sound/drivers/dummy.c
+@@ -87,7 +87,7 @@ MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-128) for dummy driver.");
+ module_param(fake_buffer, bool, 0444);
+ MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations.");
+ #ifdef CONFIG_HIGH_RES_TIMERS
+-module_param(hrtimer, bool, 0444);
++module_param(hrtimer, bool, 0644);
+ MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source.");
+ #endif
+
+@@ -109,6 +109,9 @@ struct dummy_timer_ops {
+ snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
+ };
+
++#define get_dummy_ops(substream) \
++ (*(const struct dummy_timer_ops **)(substream)->runtime->private_data)
++
+ struct dummy_model {
+ const char *name;
+ int (*playback_constraints)(struct snd_pcm_runtime *runtime);
+@@ -137,7 +140,6 @@ struct snd_dummy {
+ int iobox;
+ struct snd_kcontrol *cd_volume_ctl;
+ struct snd_kcontrol *cd_switch_ctl;
+- const struct dummy_timer_ops *timer_ops;
+ };
+
+ /*
+@@ -231,6 +233,8 @@ struct dummy_model *dummy_models[] = {
+ */
+
+ struct dummy_systimer_pcm {
++ /* ops must be the first item */
++ const struct dummy_timer_ops *timer_ops;
+ spinlock_t lock;
+ struct timer_list timer;
+ unsigned long base_time;
+@@ -368,6 +372,8 @@ static struct dummy_timer_ops dummy_systimer_ops = {
+ */
+
+ struct dummy_hrtimer_pcm {
++ /* ops must be the first item */
++ const struct dummy_timer_ops *timer_ops;
+ ktime_t base_time;
+ ktime_t period_time;
+ atomic_t running;
+@@ -494,31 +500,25 @@ static struct dummy_timer_ops dummy_hrtimer_ops = {
+
+ static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+ {
+- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+-
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+- return dummy->timer_ops->start(substream);
++ return get_dummy_ops(substream)->start(substream);
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+- return dummy->timer_ops->stop(substream);
++ return get_dummy_ops(substream)->stop(substream);
+ }
+ return -EINVAL;
+ }
+
+ static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
+ {
+- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+-
+- return dummy->timer_ops->prepare(substream);
++ return get_dummy_ops(substream)->prepare(substream);
+ }
+
+ static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
+ {
+- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+-
+- return dummy->timer_ops->pointer(substream);
++ return get_dummy_ops(substream)->pointer(substream);
+ }
+
+ static struct snd_pcm_hardware dummy_pcm_hardware = {
+@@ -564,17 +564,19 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
+ struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+ struct dummy_model *model = dummy->model;
+ struct snd_pcm_runtime *runtime = substream->runtime;
++ const struct dummy_timer_ops *ops;
+ int err;
+
+- dummy->timer_ops = &dummy_systimer_ops;
++ ops = &dummy_systimer_ops;
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ if (hrtimer)
+- dummy->timer_ops = &dummy_hrtimer_ops;
++ ops = &dummy_hrtimer_ops;
+ #endif
+
+- err = dummy->timer_ops->create(substream);
++ err = ops->create(substream);
+ if (err < 0)
+ return err;
++ get_dummy_ops(substream) = ops;
+
+ runtime->hw = dummy->pcm_hw;
+ if (substream->pcm->device & 1) {
+@@ -596,7 +598,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
+ err = model->capture_constraints(substream->runtime);
+ }
+ if (err < 0) {
+- dummy->timer_ops->free(substream);
++ get_dummy_ops(substream)->free(substream);
+ return err;
+ }
+ return 0;
+@@ -604,8 +606,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
+
+ static int dummy_pcm_close(struct snd_pcm_substream *substream)
+ {
+- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+- dummy->timer_ops->free(substream);
++ get_dummy_ops(substream)->free(substream);
+ return 0;
+ }
+
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 99e952293498..8e8ccde973df 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -435,7 +435,8 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
+ eld = &per_pin->sink_eld;
+
+ mutex_lock(&per_pin->lock);
+- if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
++ if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) ||
++ eld->eld_size > ELD_MAX_SIZE) {
+ mutex_unlock(&per_pin->lock);
+ snd_BUG();
+ return -EINVAL;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d36cdb27a02c..f9f929d5130a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2198,6 +2198,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
+ SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
++ SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
+
+ /* All Apple entries are in codec SSIDs */
+ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index d16331e0b64d..35a27281ed86 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -474,7 +474,7 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = {
+
+ /* IN1/IN2 Control */
+ SOC_SINGLE_TLV("IN1 Boost", RT5645_IN1_CTRL1,
+- RT5645_BST_SFT1, 8, 0, bst_tlv),
++ RT5645_BST_SFT1, 12, 0, bst_tlv),
+ SOC_SINGLE_TLV("IN2 Boost", RT5645_IN2_CTRL,
+ RT5645_BST_SFT2, 8, 0, bst_tlv),
+
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 57277dd79e11..f8893f51d47b 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1668,7 +1668,8 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
+- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
+ continue;
+
+ dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 5bfb695547f8..92ae88bdff7e 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -2406,7 +2406,6 @@ int snd_usbmidi_create(struct snd_card *card,
+ else
+ err = snd_usbmidi_create_endpoints(umidi, endpoints);
+ if (err < 0) {
+- snd_usbmidi_free(umidi);
+ return err;
+ }
+