summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2017-03-26 15:33:18 -0400
committerMike Pagano <mpagano@gentoo.org>2017-03-26 15:33:18 -0400
commit504b3f376381804b07a3110ce29a6bb6ac11060d (patch)
tree136468fbf275a3cd7f1d0feedff667920b495946
parentUpgrade gcc cpu optimization patch. See bug #613570 (diff)
downloadlinux-patches-504b3f376381804b07a3110ce29a6bb6ac11060d.tar.gz
linux-patches-504b3f376381804b07a3110ce29a6bb6ac11060d.tar.bz2
linux-patches-504b3f376381804b07a3110ce29a6bb6ac11060d.zip
Linux patch 4.10.64.10-7
-rw-r--r--0000_README4
-rw-r--r--1005_linux-4.10.6.patch943
2 files changed, 947 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 464eea37..014e9c49 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch: 1004_linux-4.10.5.patch
From: http://www.kernel.org
Desc: Linux 4.10.5
+Patch: 1005_linux-4.10.6.patch
+From: http://www.kernel.org
+Desc: Linux 4.10.6
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1005_linux-4.10.6.patch b/1005_linux-4.10.6.patch
new file mode 100644
index 00000000..3c1b6da9
--- /dev/null
+++ b/1005_linux-4.10.6.patch
@@ -0,0 +1,943 @@
+diff --git a/Makefile b/Makefile
+index 48e18096913f..23b6d29cb6da 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 10
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
+index 7bd69bd43a01..1d8c24dc04d4 100644
+--- a/arch/parisc/include/asm/cacheflush.h
++++ b/arch/parisc/include/asm/cacheflush.h
+@@ -45,28 +45,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
+
+ #define flush_kernel_dcache_range(start,size) \
+ flush_kernel_dcache_range_asm((start), (start)+(size));
+-/* vmap range flushes and invalidates. Architecturally, we don't need
+- * the invalidate, because the CPU should refuse to speculate once an
+- * area has been flushed, so invalidate is left empty */
+-static inline void flush_kernel_vmap_range(void *vaddr, int size)
+-{
+- unsigned long start = (unsigned long)vaddr;
+-
+- flush_kernel_dcache_range_asm(start, start + size);
+-}
+-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+-{
+- unsigned long start = (unsigned long)vaddr;
+- void *cursor = vaddr;
+
+- for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
+- struct page *page = vmalloc_to_page(cursor);
+-
+- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+- flush_kernel_dcache_page(page);
+- }
+- flush_kernel_dcache_range_asm(start, start + size);
+-}
++void flush_kernel_vmap_range(void *vaddr, int size);
++void invalidate_kernel_vmap_range(void *vaddr, int size);
+
+ #define flush_cache_vmap(start, end) flush_cache_all()
+ #define flush_cache_vunmap(start, end) flush_cache_all()
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 977f0a4f5ecf..53ec75f8e237 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -633,3 +633,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ }
+ }
++
++void flush_kernel_vmap_range(void *vaddr, int size)
++{
++ unsigned long start = (unsigned long)vaddr;
++
++ if ((unsigned long)size > parisc_cache_flush_threshold)
++ flush_data_cache();
++ else
++ flush_kernel_dcache_range_asm(start, start + size);
++}
++EXPORT_SYMBOL(flush_kernel_vmap_range);
++
++void invalidate_kernel_vmap_range(void *vaddr, int size)
++{
++ unsigned long start = (unsigned long)vaddr;
++
++ if ((unsigned long)size > parisc_cache_flush_threshold)
++ flush_data_cache();
++ else
++ flush_kernel_dcache_range_asm(start, start + size);
++}
++EXPORT_SYMBOL(invalidate_kernel_vmap_range);
+diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
+index a0ecdb4abcc8..c66c943d9322 100644
+--- a/arch/parisc/kernel/module.c
++++ b/arch/parisc/kernel/module.c
+@@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
+ */
+ *loc = fsel(val, addend);
+ break;
++ case R_PARISC_SECREL32:
++ /* 32-bit section relative address. */
++ *loc = fsel(val, addend);
++ break;
+ case R_PARISC_DPREL21L:
+ /* left 21 bit of relative address */
+ val = lrsel(val - dp, addend);
+@@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
+ */
+ *loc = fsel(val, addend);
+ break;
++ case R_PARISC_SECREL32:
++ /* 32-bit section relative address. */
++ *loc = fsel(val, addend);
++ break;
+ case R_PARISC_FPTR64:
+ /* 64-bit function address */
+ if(in_local(me, (void *)(val + addend))) {
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index ea6603ee8d24..9e2d98ee6f9c 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -139,6 +139,8 @@ void machine_power_off(void)
+
+ printk(KERN_EMERG "System shut down completed.\n"
+ "Please power this system off now.");
++
++ for (;;);
+ }
+
+ void (*pm_power_off)(void) = machine_power_off;
+diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
+index 861e72109df2..f080abfc2f83 100644
+--- a/arch/powerpc/boot/zImage.lds.S
++++ b/arch/powerpc/boot/zImage.lds.S
+@@ -68,6 +68,7 @@ SECTIONS
+ }
+
+ #ifdef CONFIG_PPC64_BOOT_WRAPPER
++ . = ALIGN(256);
+ .got :
+ {
+ __toc_start = .;
+diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
+index 3ad86fdf954e..b1ad12552b56 100644
+--- a/drivers/char/hw_random/omap-rng.c
++++ b/drivers/char/hw_random/omap-rng.c
+@@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
+ irq, err);
+ return err;
+ }
+- omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
+
+- priv->clk = of_clk_get(pdev->dev.of_node, 0);
++ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(priv->clk)) {
+@@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
+ dev_err(&pdev->dev, "unable to enable the clk, "
+ "err = %d\n", err);
+ }
++
++ /*
++ * On OMAP4, enabling the shutdown_oflo interrupt is
++ * done in the interrupt mask register. There is no
++ * such register on EIP76, and it's enabled by the
++ * same bit in the control register
++ */
++ if (priv->pdata->regs[RNG_INTMASK_REG])
++ omap_rng_write(priv, RNG_INTMASK_REG,
++ RNG_SHUTDOWN_OFLO_MASK);
++ else
++ omap_rng_write(priv, RNG_CONTROL_REG,
++ RNG_SHUTDOWN_OFLO_MASK);
+ }
+ return 0;
+ }
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index cc475eff90b3..061b165d632e 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
+ char *buf)
+ {
+ unsigned int cur_freq = __cpufreq_get(policy);
+- if (!cur_freq)
+- return sprintf(buf, "<unknown>");
+- return sprintf(buf, "%u\n", cur_freq);
++
++ if (cur_freq)
++ return sprintf(buf, "%u\n", cur_freq);
++
++ return sprintf(buf, "<unknown>\n");
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+index 6e150db8f380..9a5ccae06b6c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -3497,6 +3497,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
+ (adev->pdev->device == 0x6667)) {
+ max_sclk = 75000;
+ }
++ } else if (adev->asic_type == CHIP_OLAND) {
++ if ((adev->pdev->device == 0x6604) &&
++ (adev->pdev->subsystem_vendor == 0x1028) &&
++ (adev->pdev->subsystem_device == 0x066F)) {
++ max_sclk = 75000;
++ }
+ }
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
+index 11e13c56126f..2da3ff650e1d 100644
+--- a/drivers/isdn/gigaset/bas-gigaset.c
++++ b/drivers/isdn/gigaset/bas-gigaset.c
+@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
+ return -ENODEV;
+ }
+
++ if (hostif->desc.bNumEndpoints < 1)
++ return -ENODEV;
++
+ dev_info(&udev->dev,
+ "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
+ __func__, le16_to_cpu(udev->descriptor.idVendor),
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 1920756828df..87f14080c2cd 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1571,7 +1571,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
+ split = bio;
+ }
+
++ /*
++ * If a bio is splitted, the first part of bio will pass
++ * barrier but the bio is queued in current->bio_list (see
++ * generic_make_request). If there is a raise_barrier() called
++ * here, the second part of bio can't pass barrier. But since
++ * the first part bio isn't dispatched to underlaying disks
++ * yet, the barrier is never released, hence raise_barrier will
++ * alays wait. We have a deadlock.
++ * Note, this only happens in read path. For write path, the
++ * first part of bio is dispatched in a schedule() call
++ * (because of blk plug) or offloaded to raid10d.
++ * Quitting from the function immediately can change the bio
++ * order queued in bio_list and avoid the deadlock.
++ */
+ __make_request(mddev, split);
++ if (split != bio && bio_data_dir(bio) == READ) {
++ generic_make_request(bio);
++ break;
++ }
+ } while (split != bio);
+
+ /* In case raid10d snuck in to freeze_array */
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 3c7e106c12a2..6661db2c85f0 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -1364,7 +1364,8 @@ static int set_syndrome_sources(struct page **srcs,
+ (test_bit(R5_Wantdrain, &dev->flags) ||
+ test_bit(R5_InJournal, &dev->flags))) ||
+ (srctype == SYNDROME_SRC_WRITTEN &&
+- dev->written)) {
++ (dev->written ||
++ test_bit(R5_InJournal, &dev->flags)))) {
+ if (test_bit(R5_InJournal, &dev->flags))
+ srcs[slot] = sh->dev[i].orig_page;
+ else
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index f9b6fba689ff..a530f08592cd 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
+ WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
+ task->state = state;
+
+- if (!list_empty(&task->running))
++ spin_lock_bh(&conn->taskqueuelock);
++ if (!list_empty(&task->running)) {
++ pr_debug_once("%s while task on list", __func__);
+ list_del_init(&task->running);
++ }
++ spin_unlock_bh(&conn->taskqueuelock);
+
+ if (conn->task == task)
+ conn->task = NULL;
+@@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ if (session->tt->xmit_task(task))
+ goto free_task;
+ } else {
++ spin_lock_bh(&conn->taskqueuelock);
+ list_add_tail(&task->running, &conn->mgmtqueue);
++ spin_unlock_bh(&conn->taskqueuelock);
+ iscsi_conn_queue_work(conn);
+ }
+
+@@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
+ * this may be on the requeue list already if the xmit_task callout
+ * is handling the r2ts while we are adding new ones
+ */
++ spin_lock_bh(&conn->taskqueuelock);
+ if (list_empty(&task->running))
+ list_add_tail(&task->running, &conn->requeue);
++ spin_unlock_bh(&conn->taskqueuelock);
+ iscsi_conn_queue_work(conn);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_requeue_task);
+@@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ * only have one nop-out as a ping from us and targets should not
+ * overflow us with nop-ins
+ */
++ spin_lock_bh(&conn->taskqueuelock);
+ check_mgmt:
+ while (!list_empty(&conn->mgmtqueue)) {
+ conn->task = list_entry(conn->mgmtqueue.next,
+ struct iscsi_task, running);
+ list_del_init(&conn->task->running);
++ spin_unlock_bh(&conn->taskqueuelock);
+ if (iscsi_prep_mgmt_task(conn, conn->task)) {
+ /* regular RX path uses back_lock */
+ spin_lock_bh(&conn->session->back_lock);
+ __iscsi_put_task(conn->task);
+ spin_unlock_bh(&conn->session->back_lock);
+ conn->task = NULL;
++ spin_lock_bh(&conn->taskqueuelock);
+ continue;
+ }
+ rc = iscsi_xmit_task(conn);
+ if (rc)
+ goto done;
++ spin_lock_bh(&conn->taskqueuelock);
+ }
+
+ /* process pending command queue */
+@@ -1535,19 +1547,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
+ running);
+ list_del_init(&conn->task->running);
++ spin_unlock_bh(&conn->taskqueuelock);
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+ fail_scsi_task(conn->task, DID_IMM_RETRY);
++ spin_lock_bh(&conn->taskqueuelock);
+ continue;
+ }
+ rc = iscsi_prep_scsi_cmd_pdu(conn->task);
+ if (rc) {
+ if (rc == -ENOMEM || rc == -EACCES) {
++ spin_lock_bh(&conn->taskqueuelock);
+ list_add_tail(&conn->task->running,
+ &conn->cmdqueue);
+ conn->task = NULL;
++ spin_unlock_bh(&conn->taskqueuelock);
+ goto done;
+ } else
+ fail_scsi_task(conn->task, DID_ABORT);
++ spin_lock_bh(&conn->taskqueuelock);
+ continue;
+ }
+ rc = iscsi_xmit_task(conn);
+@@ -1558,6 +1575,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ * we need to check the mgmt queue for nops that need to
+ * be sent to aviod starvation
+ */
++ spin_lock_bh(&conn->taskqueuelock);
+ if (!list_empty(&conn->mgmtqueue))
+ goto check_mgmt;
+ }
+@@ -1577,12 +1595,15 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ conn->task = task;
+ list_del_init(&conn->task->running);
+ conn->task->state = ISCSI_TASK_RUNNING;
++ spin_unlock_bh(&conn->taskqueuelock);
+ rc = iscsi_xmit_task(conn);
+ if (rc)
+ goto done;
++ spin_lock_bh(&conn->taskqueuelock);
+ if (!list_empty(&conn->mgmtqueue))
+ goto check_mgmt;
+ }
++ spin_unlock_bh(&conn->taskqueuelock);
+ spin_unlock_bh(&conn->session->frwd_lock);
+ return -ENODATA;
+
+@@ -1738,7 +1759,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
+ goto prepd_reject;
+ }
+ } else {
++ spin_lock_bh(&conn->taskqueuelock);
+ list_add_tail(&task->running, &conn->cmdqueue);
++ spin_unlock_bh(&conn->taskqueuelock);
+ iscsi_conn_queue_work(conn);
+ }
+
+@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+ INIT_LIST_HEAD(&conn->mgmtqueue);
+ INIT_LIST_HEAD(&conn->cmdqueue);
+ INIT_LIST_HEAD(&conn->requeue);
++ spin_lock_init(&conn->taskqueuelock);
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+
+ /* allocate login_task used for the login/text sequences */
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 4776fd85514f..10f75ad2b9e8 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -11447,6 +11447,7 @@ static struct pci_driver lpfc_driver = {
+ .id_table = lpfc_id_table,
+ .probe = lpfc_pci_probe_one,
+ .remove = lpfc_pci_remove_one,
++ .shutdown = lpfc_pci_remove_one,
+ .suspend = lpfc_pci_suspend_one,
+ .resume = lpfc_pci_resume_one,
+ .err_handler = &lpfc_err_handler,
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
+index dcb33f4fa687..20492e057da3 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
+@@ -1443,9 +1443,6 @@ void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
+ extern struct sas_function_template mpt3sas_transport_functions;
+ extern struct scsi_transport_template *mpt3sas_transport_template;
+-extern int scsi_internal_device_block(struct scsi_device *sdev);
+-extern int scsi_internal_device_unblock(struct scsi_device *sdev,
+- enum scsi_device_state new_state);
+ /* trigger data externs */
+ void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 0b5b423b1db0..245fbe2f1696 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -2840,7 +2840,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
+ sas_device_priv_data->sas_target->handle);
+ sas_device_priv_data->block = 1;
+
+- r = scsi_internal_device_block(sdev);
++ r = scsi_internal_device_block(sdev, false);
+ if (r == -EINVAL)
+ sdev_printk(KERN_WARNING, sdev,
+ "device_block failed with return(%d) for handle(0x%04x)\n",
+@@ -2876,7 +2876,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
+ "performing a block followed by an unblock\n",
+ r, sas_device_priv_data->sas_target->handle);
+ sas_device_priv_data->block = 1;
+- r = scsi_internal_device_block(sdev);
++ r = scsi_internal_device_block(sdev, false);
+ if (r)
+ sdev_printk(KERN_WARNING, sdev, "retried device_block "
+ "failed with return(%d) for handle(0x%04x)\n",
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index e4fda84b959e..26fe9cb3a963 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -5372,16 +5372,22 @@ qlt_send_busy(struct scsi_qla_host *vha,
+
+ static int
+ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
+- struct atio_from_isp *atio)
++ struct atio_from_isp *atio, bool ha_locked)
+ {
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t status;
++ unsigned long flags;
+
+ if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
+ return 0;
+
++ if (!ha_locked)
++ spin_lock_irqsave(&ha->hardware_lock, flags);
+ status = temp_sam_status;
+ qlt_send_busy(vha, atio, status);
++ if (!ha_locked)
++ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
+ return 1;
+ }
+
+@@ -5426,7 +5432,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
+
+
+ if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
+- rc = qlt_chk_qfull_thresh_hold(vha, atio);
++ rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
+ if (rc != 0) {
+ tgt->atio_irq_cmd_count--;
+ return;
+@@ -5549,7 +5555,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
+ break;
+ }
+
+- rc = qlt_chk_qfull_thresh_hold(vha, atio);
++ rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
+ if (rc != 0) {
+ tgt->irq_cmd_count--;
+ return;
+@@ -6815,6 +6821,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
++ kfree(op);
+ }
+
+ void
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index f16221b66668..d438430c49a2 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -2880,6 +2880,8 @@ EXPORT_SYMBOL(scsi_target_resume);
+ /**
+ * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
+ * @sdev: device to block
++ * @wait: Whether or not to wait until ongoing .queuecommand() /
++ * .queue_rq() calls have finished.
+ *
+ * Block request made by scsi lld's to temporarily stop all
+ * scsi commands on the specified device. May sleep.
+@@ -2897,7 +2899,7 @@ EXPORT_SYMBOL(scsi_target_resume);
+ * remove the rport mutex lock and unlock calls from srp_queuecommand().
+ */
+ int
+-scsi_internal_device_block(struct scsi_device *sdev)
++scsi_internal_device_block(struct scsi_device *sdev, bool wait)
+ {
+ struct request_queue *q = sdev->request_queue;
+ unsigned long flags;
+@@ -2917,12 +2919,16 @@ scsi_internal_device_block(struct scsi_device *sdev)
+ * request queue.
+ */
+ if (q->mq_ops) {
+- blk_mq_quiesce_queue(q);
++ if (wait)
++ blk_mq_quiesce_queue(q);
++ else
++ blk_mq_stop_hw_queues(q);
+ } else {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+- scsi_wait_for_queuecommand(sdev);
++ if (wait)
++ scsi_wait_for_queuecommand(sdev);
+ }
+
+ return 0;
+@@ -2984,7 +2990,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
+ static void
+ device_block(struct scsi_device *sdev, void *data)
+ {
+- scsi_internal_device_block(sdev);
++ scsi_internal_device_block(sdev, true);
+ }
+
+ static int
+diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
+index 193636a59adf..9811f82b9d0c 100644
+--- a/drivers/scsi/scsi_priv.h
++++ b/drivers/scsi/scsi_priv.h
+@@ -189,8 +189,5 @@ static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
+ */
+
+ #define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
+-extern int scsi_internal_device_block(struct scsi_device *sdev);
+-extern int scsi_internal_device_unblock(struct scsi_device *sdev,
+- enum scsi_device_state new_state);
+
+ #endif /* _SCSI_PRIV_H */
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 04d7aa7390d0..3e677866bb4f 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
+
+ buf = kzalloc(12, GFP_KERNEL);
+ if (!buf)
+- return;
++ goto out_free;
+
+ memset(cdb, 0, MAX_COMMAND_SIZE);
+ cdb[0] = MODE_SENSE;
+@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
+ * If MODE_SENSE still returns zero, set the default value to 1024.
+ */
+ sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
++out_free:
+ if (!sdev->sector_size)
+ sdev->sector_size = 1024;
+-out_free:
++
+ kfree(buf);
+ }
+
+@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
+ sd->lun, sd->queue_depth);
+ }
+
+- dev->dev_attrib.hw_block_size = sd->sector_size;
++ dev->dev_attrib.hw_block_size =
++ min_not_zero((int)sd->sector_size, 512);
+ dev->dev_attrib.hw_max_sectors =
+- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
++ min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
+ dev->dev_attrib.hw_queue_depth = sd->queue_depth;
+
+ /*
+@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
+ /*
+ * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
+ */
+- if (sd->type == TYPE_TAPE)
++ if (sd->type == TYPE_TAPE) {
+ pscsi_tape_read_blocksize(dev, sd);
++ dev->dev_attrib.hw_block_size = sd->sector_size;
++ }
+ return 0;
+ }
+
+@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
+ /*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
++static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
+ __releases(sh->host_lock)
+ {
+ struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+ return 0;
+ }
+
+-/*
+- * Called with struct Scsi_Host->host_lock called.
+- */
+-static int pscsi_create_type_other(struct se_device *dev,
+- struct scsi_device *sd)
+- __releases(sh->host_lock)
+-{
+- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+- struct Scsi_Host *sh = sd->host;
+- int ret;
+-
+- spin_unlock_irq(sh->host_lock);
+- ret = pscsi_add_device_to_list(dev, sd);
+- if (ret)
+- return ret;
+-
+- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
+- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+- sd->channel, sd->id, sd->lun);
+- return 0;
+-}
+-
+ static int pscsi_configure_device(struct se_device *dev)
+ {
+ struct se_hba *hba = dev->se_hba;
+@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
+ case TYPE_DISK:
+ ret = pscsi_create_type_disk(dev, sd);
+ break;
+- case TYPE_ROM:
+- ret = pscsi_create_type_rom(dev, sd);
+- break;
+ default:
+- ret = pscsi_create_type_other(dev, sd);
++ ret = pscsi_create_type_nondisk(dev, sd);
+ break;
+ }
+
+@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
+ else if (pdv->pdv_lld_host)
+ scsi_host_put(pdv->pdv_lld_host);
+
+- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
+- scsi_device_put(sd);
++ scsi_device_put(sd);
+
+ pdv->pdv_sd = NULL;
+ }
+@@ -1065,7 +1043,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
+ if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
+ return pdv->pdv_bd->bd_part->nr_sects;
+
+- dump_stack();
+ return 0;
+ }
+
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index df7b6e95c019..6ec5dded4ae0 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ return ret;
+ break;
+ case VERIFY:
++ case VERIFY_16:
+ size = 0;
+- sectors = transport_get_sectors_10(cdb);
+- cmd->t_task_lba = transport_lba_32(cdb);
++ if (cdb[0] == VERIFY) {
++ sectors = transport_get_sectors_10(cdb);
++ cmd->t_task_lba = transport_lba_32(cdb);
++ } else {
++ sectors = transport_get_sectors_16(cdb);
++ cmd->t_task_lba = transport_lba_64(cdb);
++ }
+ cmd->execute_cmd = sbc_emulate_noop;
+ goto check_lba;
+ case REZERO_UNIT:
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
+index a6a3389a07fc..51519c2836b5 100644
+--- a/fs/gfs2/incore.h
++++ b/fs/gfs2/incore.h
+@@ -207,7 +207,7 @@ struct lm_lockname {
+ struct gfs2_sbd *ln_sbd;
+ u64 ln_number;
+ unsigned int ln_type;
+-};
++} __packed __aligned(sizeof(int));
+
+ #define lm_name_equal(name1, name2) \
+ (((name1)->ln_number == (name2)->ln_number) && \
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 37bcd887f742..0a436c4a28ad 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7541,11 +7541,11 @@ static void nfs4_exchange_id_release(void *data)
+ struct nfs41_exchange_id_data *cdata =
+ (struct nfs41_exchange_id_data *)data;
+
+- nfs_put_client(cdata->args.client);
+ if (cdata->xprt) {
+ xprt_put(cdata->xprt);
+ rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient);
+ }
++ nfs_put_client(cdata->args.client);
+ kfree(cdata->res.impl_id);
+ kfree(cdata->res.server_scope);
+ kfree(cdata->res.server_owner);
+@@ -7652,10 +7652,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
+ task_setup_data.callback_data = calldata;
+
+ task = rpc_run_task(&task_setup_data);
+- if (IS_ERR(task)) {
+- status = PTR_ERR(task);
+- goto out_impl_id;
+- }
++ if (IS_ERR(task))
++ return PTR_ERR(task);
+
+ if (!xprt) {
+ status = rpc_wait_for_completion_task(task);
+@@ -7683,6 +7681,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
+ kfree(calldata->res.server_owner);
+ out_calldata:
+ kfree(calldata);
++ nfs_put_client(clp);
+ goto out;
+ }
+
+diff --git a/include/linux/log2.h b/include/linux/log2.h
+index ef3d4f67118c..c373295f359f 100644
+--- a/include/linux/log2.h
++++ b/include/linux/log2.h
+@@ -16,12 +16,6 @@
+ #include <linux/bitops.h>
+
+ /*
+- * deal with unrepresentable constant logarithms
+- */
+-extern __attribute__((const, noreturn))
+-int ____ilog2_NaN(void);
+-
+-/*
+ * non-constant log of base 2 calculators
+ * - the arch may override these in asm/bitops.h if they can be implemented
+ * more efficiently than using fls() and fls64()
+@@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ #define ilog2(n) \
+ ( \
+ __builtin_constant_p(n) ? ( \
+- (n) < 1 ? ____ilog2_NaN() : \
++ (n) < 2 ? 0 : \
+ (n) & (1ULL << 63) ? 63 : \
+ (n) & (1ULL << 62) ? 62 : \
+ (n) & (1ULL << 61) ? 61 : \
+@@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ (n) & (1ULL << 4) ? 4 : \
+ (n) & (1ULL << 3) ? 3 : \
+ (n) & (1ULL << 2) ? 2 : \
+- (n) & (1ULL << 1) ? 1 : \
+- (n) & (1ULL << 0) ? 0 : \
+- ____ilog2_NaN() \
+- ) : \
++ 1 ) : \
+ (sizeof(n) <= 4) ? \
+ __ilog2_u32(n) : \
+ __ilog2_u64(n) \
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 4d1c46aac331..c7b1dc713cdd 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -196,6 +196,7 @@ struct iscsi_conn {
+ struct iscsi_task *task; /* xmit task in progress */
+
+ /* xmit */
++ spinlock_t taskqueuelock; /* protects the next three lists */
+ struct list_head mgmtqueue; /* mgmt (control) xmit queue */
+ struct list_head cmdqueue; /* data-path cmd queue */
+ struct list_head requeue; /* tasks needing another run */
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index be41c76ddd48..59ed779bbd9a 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -475,6 +475,10 @@ static inline int scsi_device_created(struct scsi_device *sdev)
+ sdev->sdev_state == SDEV_CREATED_BLOCK;
+ }
+
++int scsi_internal_device_block(struct scsi_device *sdev, bool wait);
++int scsi_internal_device_unblock(struct scsi_device *sdev,
++ enum scsi_device_state new_state);
++
+ /* accessor functions for the SCSI parameters */
+ static inline int scsi_device_sync(struct scsi_device *sdev)
+ {
+diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
+index 2bd673783f1a..a57242e0d5a6 100644
+--- a/kernel/cgroup_pids.c
++++ b/kernel/cgroup_pids.c
+@@ -229,7 +229,7 @@ static int pids_can_fork(struct task_struct *task)
+ /* Only log the first time events_limit is incremented. */
+ if (atomic64_inc_return(&pids->events_limit) == 1) {
+ pr_info("cgroup: fork rejected by pids controller in ");
+- pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id));
++ pr_cont_cgroup_path(css->cgroup);
+ pr_cont("\n");
+ }
+ cgroup_file_notify(&pids->events_file);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e235bb991bdd..8113654a815a 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -10374,6 +10374,17 @@ void perf_event_free_task(struct task_struct *task)
+ continue;
+
+ mutex_lock(&ctx->mutex);
++ raw_spin_lock_irq(&ctx->lock);
++ /*
++ * Destroy the task <-> ctx relation and mark the context dead.
++ *
++ * This is important because even though the task hasn't been
++ * exposed yet the context has been (through child_list).
++ */
++ RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
++ WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
++ put_task_struct(task); /* cannot be last */
++ raw_spin_unlock_irq(&ctx->lock);
+ again:
+ list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
+ group_entry)
+@@ -10627,7 +10638,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
+ ret = inherit_task_group(event, parent, parent_ctx,
+ child, ctxn, &inherited_all);
+ if (ret)
+- break;
++ goto out_unlock;
+ }
+
+ /*
+@@ -10643,7 +10654,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
+ ret = inherit_task_group(event, parent, parent_ctx,
+ child, ctxn, &inherited_all);
+ if (ret)
+- break;
++ goto out_unlock;
+ }
+
+ raw_spin_lock_irqsave(&parent_ctx->lock, flags);
+@@ -10671,6 +10682,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
+ }
+
+ raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
++out_unlock:
+ mutex_unlock(&parent_ctx->mutex);
+
+ perf_unpin_context(parent_ctx);
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 0686f566d347..232356a2d914 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1011,8 +1011,11 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
+ mutex_unlock(&pcpu_alloc_mutex);
+ }
+
+- if (chunk != pcpu_reserved_chunk)
++ if (chunk != pcpu_reserved_chunk) {
++ spin_lock_irqsave(&pcpu_lock, flags);
+ pcpu_nr_empty_pop_pages -= occ_pages;
++ spin_unlock_irqrestore(&pcpu_lock, flags);
++ }
+
+ if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
+ pcpu_schedule_balance_work();
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 61d16c39e92c..6e27aab79f76 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -495,7 +495,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
+ struct ib_cq *sendcq, *recvcq;
+ int rc;
+
+- max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES);
++ max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
++ RPCRDMA_MAX_SEND_SGES);
+ if (max_sge < RPCRDMA_MIN_SEND_SGES) {
+ pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
+ return -ENOMEM;
+diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
+index 41446668ccce..d5677d39c1e4 100644
+--- a/tools/include/linux/log2.h
++++ b/tools/include/linux/log2.h
+@@ -13,12 +13,6 @@
+ #define _TOOLS_LINUX_LOG2_H
+
+ /*
+- * deal with unrepresentable constant logarithms
+- */
+-extern __attribute__((const, noreturn))
+-int ____ilog2_NaN(void);
+-
+-/*
+ * non-constant log of base 2 calculators
+ * - the arch may override these in asm/bitops.h if they can be implemented
+ * more efficiently than using fls() and fls64()
+@@ -78,7 +72,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ #define ilog2(n) \
+ ( \
+ __builtin_constant_p(n) ? ( \
+- (n) < 1 ? ____ilog2_NaN() : \
++ (n) < 2 ? 0 : \
+ (n) & (1ULL << 63) ? 63 : \
+ (n) & (1ULL << 62) ? 62 : \
+ (n) & (1ULL << 61) ? 61 : \
+@@ -141,10 +135,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ (n) & (1ULL << 4) ? 4 : \
+ (n) & (1ULL << 3) ? 3 : \
+ (n) & (1ULL << 2) ? 2 : \
+- (n) & (1ULL << 1) ? 1 : \
+- (n) & (1ULL << 0) ? 0 : \
+- ____ilog2_NaN() \
+- ) : \
++ 1 ) : \
+ (sizeof(n) <= 4) ? \
+ __ilog2_u32(n) : \
+ __ilog2_u64(n) \