summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-02-15 07:45:43 -0500
committerMike Pagano <mpagano@gentoo.org>2019-02-15 07:45:43 -0500
commit28c7eceb0c1de020e71f9f62252291c7753fa792 (patch)
tree9b032bf5648b66742ee5e3bd5a8cc455e6b92387
parentproj/linux-patches: Linux patch 4.9.156 (diff)
downloadlinux-patches-28c7eceb.tar.gz
linux-patches-28c7eceb.tar.bz2
linux-patches-28c7eceb.zip
proj/linux-patches: Linux patch 4.9.157 and 4.9.1584.9-160
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README8
-rw-r--r--1156_linux-4.9.157.patch889
-rw-r--r--1157_linux-4.9.158.patch34
3 files changed, 931 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index dc5a410d..0d0c627b 100644
--- a/0000_README
+++ b/0000_README
@@ -667,6 +667,14 @@ Patch: 1155_linux-4.9.156.patch
From: http://www.k5rnel.org
Desc: Linux 4.9.156
+Patch: 1156_linux-4.9.157.patch
+From: http://www.k5rnel.org
+Desc: Linux 4.9.157
+
+Patch: 1157_linux-4.9.158.patch
+From: http://www.k5rnel.org
+Desc: Linux 4.9.158
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1156_linux-4.9.157.patch b/1156_linux-4.9.157.patch
new file mode 100644
index 00000000..92b7e3e8
--- /dev/null
+++ b/1156_linux-4.9.157.patch
@@ -0,0 +1,889 @@
+diff --git a/Makefile b/Makefile
+index 956923115f7e..4eb7a17e18f1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 156
++SUBLEVEL = 157
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
+index c1cd80ecc219..a904244264ce 100644
+--- a/arch/arm/mach-iop32x/n2100.c
++++ b/arch/arm/mach-iop32x/n2100.c
+@@ -75,8 +75,7 @@ void __init n2100_map_io(void)
+ /*
+ * N2100 PCI.
+ */
+-static int __init
+-n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
++static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ {
+ int irq;
+
+diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c
+index b05c6d6f99d0..08d813234b2d 100644
+--- a/arch/arm/mach-tango/pm.c
++++ b/arch/arm/mach-tango/pm.c
+@@ -2,6 +2,7 @@
+ #include <linux/suspend.h>
+ #include <asm/suspend.h>
+ #include "smc.h"
++#include "pm.h"
+
+ static int tango_pm_powerdown(unsigned long arg)
+ {
+@@ -23,10 +24,7 @@ static const struct platform_suspend_ops tango_pm_ops = {
+ .valid = suspend_valid_only_mem,
+ };
+
+-static int __init tango_pm_init(void)
++void __init tango_pm_init(void)
+ {
+ suspend_set_ops(&tango_pm_ops);
+- return 0;
+ }
+-
+-late_initcall(tango_pm_init);
+diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h
+new file mode 100644
+index 000000000000..35ea705a0ee2
+--- /dev/null
++++ b/arch/arm/mach-tango/pm.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#ifdef CONFIG_SUSPEND
++void __init tango_pm_init(void);
++#else
++#define tango_pm_init NULL
++#endif
+diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c
+index f14b6c7d255b..2b48e1098ea3 100644
+--- a/arch/arm/mach-tango/setup.c
++++ b/arch/arm/mach-tango/setup.c
+@@ -1,6 +1,7 @@
+ #include <asm/mach/arch.h>
+ #include <asm/hardware/cache-l2x0.h>
+ #include "smc.h"
++#include "pm.h"
+
+ static void tango_l2c_write(unsigned long val, unsigned int reg)
+ {
+@@ -14,4 +15,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT")
+ .dt_compat = tango_dt_compat,
+ .l2c_aux_mask = ~0,
+ .l2c_write_sec = tango_l2c_write,
++ .init_late = tango_pm_init,
+ MACHINE_END
+diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
+index 659e6d3ae335..60177a612cb1 100644
+--- a/arch/mips/kernel/mips-cm.c
++++ b/arch/mips/kernel/mips-cm.c
+@@ -424,5 +424,5 @@ void mips_cm_error_report(void)
+ }
+
+ /* reprime cause register */
+- write_gcr_error_cause(0);
++ write_gcr_error_cause(cm_error);
+ }
+diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
+index 308d051fc45c..7c512834a8f1 100644
+--- a/arch/mips/pci/pci-octeon.c
++++ b/arch/mips/pci/pci-octeon.c
+@@ -573,6 +573,11 @@ static int __init octeon_pci_setup(void)
+ if (octeon_has_feature(OCTEON_FEATURE_PCIE))
+ return 0;
+
++ if (!octeon_is_pci_host()) {
++ pr_notice("Not in host mode, PCI Controller not initialized\n");
++ return 0;
++ }
++
+ /* Point pcibios_map_irq() to the PCI version of it */
+ octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
+
+@@ -584,11 +589,6 @@ static int __init octeon_pci_setup(void)
+ else
+ octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
+
+- if (!octeon_is_pci_host()) {
+- pr_notice("Not in host mode, PCI Controller not initialized\n");
+- return 0;
+- }
+-
+ /* PCI I/O and PCI MEM values */
+ set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
+ ioport_resource.start = 0;
+diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
+index c3dc12a8b7d9..0b845cc7fbdc 100644
+--- a/arch/mips/vdso/Makefile
++++ b/arch/mips/vdso/Makefile
+@@ -116,7 +116,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
+ $(call cmd,force_checksrc)
+ $(call if_changed_rule,cc_o_c)
+
+-$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
++$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
+ $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
+ $(call if_changed_dep,cpp_lds_S)
+
+@@ -156,7 +156,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
+ $(call cmd,force_checksrc)
+ $(call if_changed_rule,cc_o_c)
+
+-$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
++$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
+ $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
+ $(call if_changed_dep,cpp_lds_S)
+
+diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
+index e14366de0e6e..97387cfbbeb5 100644
+--- a/drivers/gpu/drm/drm_modes.c
++++ b/drivers/gpu/drm/drm_modes.c
+@@ -753,7 +753,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
+ if (mode->hsync)
+ return mode->hsync;
+
+- if (mode->htotal < 0)
++ if (mode->htotal <= 0)
+ return 0;
+
+ calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 29abd28c19b3..4b556e698f13 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -605,13 +605,16 @@ out_fixup:
+ static int vmw_dma_masks(struct vmw_private *dev_priv)
+ {
+ struct drm_device *dev = dev_priv->dev;
++ int ret = 0;
+
+- if (intel_iommu_enabled &&
++ ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
++ if (dev_priv->map_mode != vmw_dma_phys &&
+ (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
+ DRM_INFO("Restricting DMA addresses to 44 bits.\n");
+- return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
++ return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
+ }
+- return 0;
++
++ return ret;
+ }
+ #else
+ static int vmw_dma_masks(struct vmw_private *dev_priv)
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 81f5a552e32f..9fe8eda7c859 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -3769,7 +3769,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
+ *p_fence = NULL;
+ }
+
+- return 0;
++ return ret;
+ }
+
+ /**
+diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
+index 29423691c105..d7179dd3c9ef 100644
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -30,6 +30,7 @@
+
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
++#include <linux/kfifo.h>
+ #include <linux/sched.h>
+ #include <linux/export.h>
+ #include <linux/slab.h>
+@@ -455,7 +456,7 @@ static char *resolv_usage_page(unsigned page, struct seq_file *f) {
+ char *buf = NULL;
+
+ if (!f) {
+- buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
++ buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_ATOMIC);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+ }
+@@ -659,17 +660,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
+ /* enqueue string to 'events' ring buffer */
+ void hid_debug_event(struct hid_device *hdev, char *buf)
+ {
+- unsigned i;
+ struct hid_debug_list *list;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdev->debug_list_lock, flags);
+- list_for_each_entry(list, &hdev->debug_list, node) {
+- for (i = 0; buf[i]; i++)
+- list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
+- buf[i];
+- list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
+- }
++ list_for_each_entry(list, &hdev->debug_list, node)
++ kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
+ spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
+
+ wake_up_interruptible(&hdev->debug_wait);
+@@ -720,8 +716,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu
+ hid_debug_event(hdev, buf);
+
+ kfree(buf);
+- wake_up_interruptible(&hdev->debug_wait);
+-
++ wake_up_interruptible(&hdev->debug_wait);
+ }
+ EXPORT_SYMBOL_GPL(hid_dump_input);
+
+@@ -1086,8 +1081,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
+ goto out;
+ }
+
+- if (!(list->hid_debug_buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
+- err = -ENOMEM;
++ err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
++ if (err) {
+ kfree(list);
+ goto out;
+ }
+@@ -1107,77 +1102,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+ {
+ struct hid_debug_list *list = file->private_data;
+- int ret = 0, len;
++ int ret = 0, copied;
+ DECLARE_WAITQUEUE(wait, current);
+
+ mutex_lock(&list->read_mutex);
+- while (ret == 0) {
+- if (list->head == list->tail) {
+- add_wait_queue(&list->hdev->debug_wait, &wait);
+- set_current_state(TASK_INTERRUPTIBLE);
+-
+- while (list->head == list->tail) {
+- if (file->f_flags & O_NONBLOCK) {
+- ret = -EAGAIN;
+- break;
+- }
+- if (signal_pending(current)) {
+- ret = -ERESTARTSYS;
+- break;
+- }
++ if (kfifo_is_empty(&list->hid_debug_fifo)) {
++ add_wait_queue(&list->hdev->debug_wait, &wait);
++ set_current_state(TASK_INTERRUPTIBLE);
++
++ while (kfifo_is_empty(&list->hid_debug_fifo)) {
++ if (file->f_flags & O_NONBLOCK) {
++ ret = -EAGAIN;
++ break;
++ }
+
+- if (!list->hdev || !list->hdev->debug) {
+- ret = -EIO;
+- set_current_state(TASK_RUNNING);
+- goto out;
+- }
++ if (signal_pending(current)) {
++ ret = -ERESTARTSYS;
++ break;
++ }
+
+- /* allow O_NONBLOCK from other threads */
+- mutex_unlock(&list->read_mutex);
+- schedule();
+- mutex_lock(&list->read_mutex);
+- set_current_state(TASK_INTERRUPTIBLE);
++ /* if list->hdev is NULL we cannot remove_wait_queue().
++ * if list->hdev->debug is 0 then hid_debug_unregister()
++ * was already called and list->hdev is being destroyed.
++ * if we add remove_wait_queue() here we can hit a race.
++ */
++ if (!list->hdev || !list->hdev->debug) {
++ ret = -EIO;
++ set_current_state(TASK_RUNNING);
++ goto out;
+ }
+
+- set_current_state(TASK_RUNNING);
+- remove_wait_queue(&list->hdev->debug_wait, &wait);
++ /* allow O_NONBLOCK from other threads */
++ mutex_unlock(&list->read_mutex);
++ schedule();
++ mutex_lock(&list->read_mutex);
++ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+- if (ret)
+- goto out;
++ __set_current_state(TASK_RUNNING);
++ remove_wait_queue(&list->hdev->debug_wait, &wait);
+
+- /* pass the ringbuffer contents to userspace */
+-copy_rest:
+- if (list->tail == list->head)
++ if (ret)
+ goto out;
+- if (list->tail > list->head) {
+- len = list->tail - list->head;
+- if (len > count)
+- len = count;
+-
+- if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
+- ret = -EFAULT;
+- goto out;
+- }
+- ret += len;
+- list->head += len;
+- } else {
+- len = HID_DEBUG_BUFSIZE - list->head;
+- if (len > count)
+- len = count;
+-
+- if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
+- ret = -EFAULT;
+- goto out;
+- }
+- list->head = 0;
+- ret += len;
+- count -= len;
+- if (count > 0)
+- goto copy_rest;
+- }
+-
+ }
++
++ /* pass the fifo content to userspace, locking is not needed with only
++ * one concurrent reader and one concurrent writer
++ */
++ ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
++ if (ret)
++ goto out;
++ ret = copied;
+ out:
+ mutex_unlock(&list->read_mutex);
+ return ret;
+@@ -1188,7 +1163,7 @@ static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
+ struct hid_debug_list *list = file->private_data;
+
+ poll_wait(file, &list->hdev->debug_wait, wait);
+- if (list->head != list->tail)
++ if (!kfifo_is_empty(&list->hid_debug_fifo))
+ return POLLIN | POLLRDNORM;
+ if (!list->hdev->debug)
+ return POLLERR | POLLHUP;
+@@ -1203,7 +1178,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
+ spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
+ list_del(&list->node);
+ spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
+- kfree(list->hid_debug_buf);
++ kfifo_free(&list->hid_debug_fifo);
+ kfree(list);
+
+ return 0;
+@@ -1254,4 +1229,3 @@ void hid_debug_exit(void)
+ {
+ debugfs_remove_recursive(hid_debug_root);
+ }
+-
+diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
+index ef761a508630..dad2a8be6830 100644
+--- a/drivers/iio/chemical/atlas-ph-sensor.c
++++ b/drivers/iio/chemical/atlas-ph-sensor.c
+@@ -453,9 +453,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+- *val = 1; /* 0.01 */
+- *val2 = 100;
+- break;
++ *val = 10;
++ return IIO_VAL_INT;
+ case IIO_PH:
+ *val = 1; /* 0.001 */
+ *val2 = 1000;
+@@ -486,7 +485,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
+ int val, int val2, long mask)
+ {
+ struct atlas_data *data = iio_priv(indio_dev);
+- __be32 reg = cpu_to_be32(val);
++ __be32 reg = cpu_to_be32(val / 10);
+
+ if (val2 != 0 || val < 0 || val > 20000)
+ return -EINVAL;
+diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
+index c344483fa7d6..9f257c53e6d4 100644
+--- a/drivers/misc/vexpress-syscfg.c
++++ b/drivers/misc/vexpress-syscfg.c
+@@ -61,7 +61,7 @@ static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
+ int tries;
+ long timeout;
+
+- if (WARN_ON(index > func->num_templates))
++ if (WARN_ON(index >= func->num_templates))
+ return -EINVAL;
+
+ command = readl(syscfg->base + SYS_CFGCTRL);
+diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+index 141bd70a49c2..b9509230ce4d 100644
+--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
++++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+@@ -168,9 +168,10 @@ int gpmi_init(struct gpmi_nand_data *this)
+
+ /*
+ * Reset BCH here, too. We got failures otherwise :(
+- * See later BCH reset for explanation of MX23 handling
++ * See later BCH reset for explanation of MX23 and MX28 handling
+ */
+- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
++ ret = gpmi_reset_block(r->bch_regs,
++ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
+ if (ret)
+ goto err_out;
+
+@@ -275,13 +276,11 @@ int bch_set_geometry(struct gpmi_nand_data *this)
+
+ /*
+ * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
+- * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
+- * On the other hand, the MX28 needs the reset, because one case has been
+- * seen where the BCH produced ECC errors constantly after 10000
+- * consecutive reboots. The latter case has not been seen on the MX23
+- * yet, still we don't know if it could happen there as well.
++ * chip, otherwise it will lock up. So we skip resetting BCH on the MX23
++ * and MX28.
+ */
+- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
++ ret = gpmi_reset_block(r->bch_regs,
++ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
+ if (ret)
+ goto err_out;
+
+diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
+index 8bef27b8f85d..e7b478b49985 100644
+--- a/fs/cifs/Kconfig
++++ b/fs/cifs/Kconfig
+@@ -111,7 +111,7 @@ config CIFS_XATTR
+
+ config CIFS_POSIX
+ bool "CIFS POSIX Extensions"
+- depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
++ depends on CIFS_XATTR
+ help
+ Enabling this option will cause the cifs client to attempt to
+ negotiate a newer dialect with servers, such as Samba 3.0.5
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 3d7de9f4f545..77e9cd7a0137 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -732,6 +732,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
+ struct dentry *dentry = NULL, *trap;
+ struct name_snapshot old_name;
+
++ if (IS_ERR(old_dir))
++ return old_dir;
++ if (IS_ERR(new_dir))
++ return new_dir;
++ if (IS_ERR_OR_NULL(old_dentry))
++ return old_dentry;
++
+ trap = lock_rename(new_dir, old_dir);
+ /* Source or destination directories don't exist? */
+ if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 12d780718b48..3656f87d11e3 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1472,8 +1472,10 @@ free_session_slots(struct nfsd4_session *ses)
+ {
+ int i;
+
+- for (i = 0; i < ses->se_fchannel.maxreqs; i++)
++ for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
++ free_svc_cred(&ses->se_slots[i]->sl_cred);
+ kfree(ses->se_slots[i]);
++ }
+ }
+
+ /*
+@@ -2344,14 +2346,18 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
+
+ dprintk("--> %s slot %p\n", __func__, slot);
+
++ slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
+ slot->sl_opcnt = resp->opcnt;
+ slot->sl_status = resp->cstate.status;
++ free_svc_cred(&slot->sl_cred);
++ copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
+
+- slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
+- if (nfsd4_not_cached(resp)) {
+- slot->sl_datalen = 0;
++ if (!nfsd4_cache_this(resp)) {
++ slot->sl_flags &= ~NFSD4_SLOT_CACHED;
+ return;
+ }
++ slot->sl_flags |= NFSD4_SLOT_CACHED;
++
+ base = resp->cstate.data_offset;
+ slot->sl_datalen = buf->len - base;
+ if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
+@@ -2378,8 +2384,16 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
+ op = &args->ops[resp->opcnt - 1];
+ nfsd4_encode_operation(resp, op);
+
+- /* Return nfserr_retry_uncached_rep in next operation. */
+- if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
++ if (slot->sl_flags & NFSD4_SLOT_CACHED)
++ return op->status;
++ if (args->opcnt == 1) {
++ /*
++ * The original operation wasn't a solo sequence--we
++ * always cache those--so this retry must not match the
++ * original:
++ */
++ op->status = nfserr_seq_false_retry;
++ } else {
+ op = &args->ops[resp->opcnt++];
+ op->status = nfserr_retry_uncached_rep;
+ nfsd4_encode_operation(resp, op);
+@@ -3039,6 +3053,34 @@ static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
+ return xb->len > session->se_fchannel.maxreq_sz;
+ }
+
++static bool replay_matches_cache(struct svc_rqst *rqstp,
++ struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
++{
++ struct nfsd4_compoundargs *argp = rqstp->rq_argp;
++
++ if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
++ (bool)seq->cachethis)
++ return false;
++ /*
++ * If there's an error than the reply can have fewer ops than
++ * the call. But if we cached a reply with *more* ops than the
++ * call you're sending us now, then this new call is clearly not
++ * really a replay of the old one:
++ */
++ if (slot->sl_opcnt < argp->opcnt)
++ return false;
++ /* This is the only check explicitly called by spec: */
++ if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
++ return false;
++ /*
++ * There may be more comparisons we could actually do, but the
++ * spec doesn't require us to catch every case where the calls
++ * don't match (that would require caching the call as well as
++ * the reply), so we don't bother.
++ */
++ return true;
++}
++
+ __be32
+ nfsd4_sequence(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+@@ -3098,6 +3140,9 @@ nfsd4_sequence(struct svc_rqst *rqstp,
+ status = nfserr_seq_misordered;
+ if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
+ goto out_put_session;
++ status = nfserr_seq_false_retry;
++ if (!replay_matches_cache(rqstp, seq, slot))
++ goto out_put_session;
+ cstate->slot = slot;
+ cstate->session = session;
+ cstate->clp = clp;
+diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
+index 005c911b34ac..86aa92d200e1 100644
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -169,11 +169,13 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s)
+ struct nfsd4_slot {
+ u32 sl_seqid;
+ __be32 sl_status;
++ struct svc_cred sl_cred;
+ u32 sl_datalen;
+ u16 sl_opcnt;
+ #define NFSD4_SLOT_INUSE (1 << 0)
+ #define NFSD4_SLOT_CACHETHIS (1 << 1)
+ #define NFSD4_SLOT_INITIALIZED (1 << 2)
++#define NFSD4_SLOT_CACHED (1 << 3)
+ u8 sl_flags;
+ char sl_data[];
+ };
+diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
+index 8fda4abdf3b1..448e74e32344 100644
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -645,9 +645,18 @@ static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
+ return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE;
+ }
+
+-static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
++/*
++ * The session reply cache only needs to cache replies that the client
++ * actually asked us to. But it's almost free for us to cache compounds
++ * consisting of only a SEQUENCE op, so we may as well cache those too.
++ * Also, the protocol doesn't give us a convenient response in the case
++ * of a replay of a solo SEQUENCE op that wasn't cached
++ * (RETRY_UNCACHED_REP can only be returned in the second op of a
++ * compound).
++ */
++static inline bool nfsd4_cache_this(struct nfsd4_compoundres *resp)
+ {
+- return !(resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)
++ return (resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)
+ || nfsd4_is_solo_sequence(resp);
+ }
+
+diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
+index 8663f216c563..2d6100edf204 100644
+--- a/include/linux/hid-debug.h
++++ b/include/linux/hid-debug.h
+@@ -24,7 +24,10 @@
+
+ #ifdef CONFIG_DEBUG_FS
+
++#include <linux/kfifo.h>
++
+ #define HID_DEBUG_BUFSIZE 512
++#define HID_DEBUG_FIFOSIZE 512
+
+ void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
+ void hid_dump_report(struct hid_device *, int , u8 *, int);
+@@ -37,11 +40,8 @@ void hid_debug_init(void);
+ void hid_debug_exit(void);
+ void hid_debug_event(struct hid_device *, char *);
+
+-
+ struct hid_debug_list {
+- char *hid_debug_buf;
+- int head;
+- int tail;
++ DECLARE_KFIFO_PTR(hid_debug_fifo, char);
+ struct fasync_struct *fasync;
+ struct hid_device *hdev;
+ struct list_head node;
+@@ -64,4 +64,3 @@ struct hid_debug_list {
+ #endif
+
+ #endif
+-
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 049929a5f4ce..798b8f495ae2 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -696,6 +696,48 @@ static inline bool si_fromuser(const struct siginfo *info)
+ (!is_si_special(info) && SI_FROMUSER(info));
+ }
+
++static int dequeue_synchronous_signal(siginfo_t *info)
++{
++ struct task_struct *tsk = current;
++ struct sigpending *pending = &tsk->pending;
++ struct sigqueue *q, *sync = NULL;
++
++ /*
++ * Might a synchronous signal be in the queue?
++ */
++ if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
++ return 0;
++
++ /*
++ * Return the first synchronous signal in the queue.
++ */
++ list_for_each_entry(q, &pending->list, list) {
++ /* Synchronous signals have a postive si_code */
++ if ((q->info.si_code > SI_USER) &&
++ (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
++ sync = q;
++ goto next;
++ }
++ }
++ return 0;
++next:
++ /*
++ * Check if there is another siginfo for the same signal.
++ */
++ list_for_each_entry_continue(q, &pending->list, list) {
++ if (q->info.si_signo == sync->info.si_signo)
++ goto still_pending;
++ }
++
++ sigdelset(&pending->signal, sync->info.si_signo);
++ recalc_sigpending();
++still_pending:
++ list_del_init(&sync->list);
++ copy_siginfo(info, &sync->info);
++ __sigqueue_free(sync);
++ return info->si_signo;
++}
++
+ /*
+ * called with RCU read lock from check_kill_permission()
+ */
+@@ -2198,6 +2240,11 @@ relock:
+ goto relock;
+ }
+
++ /* Has this task already been marked for death? */
++ ksig->info.si_signo = signr = SIGKILL;
++ if (signal_group_exit(signal))
++ goto fatal;
++
+ for (;;) {
+ struct k_sigaction *ka;
+
+@@ -2211,7 +2258,15 @@ relock:
+ goto relock;
+ }
+
+- signr = dequeue_signal(current, &current->blocked, &ksig->info);
++ /*
++ * Signals generated by the execution of an instruction
++ * need to be delivered before any other pending signals
++ * so that the instruction pointer in the signal stack
++ * frame points to the faulting instruction.
++ */
++ signr = dequeue_synchronous_signal(&ksig->info);
++ if (!signr)
++ signr = dequeue_signal(current, &current->blocked, &ksig->info);
+
+ if (!signr)
+ break; /* will return 0 */
+@@ -2293,6 +2348,7 @@ relock:
+ continue;
+ }
+
++ fatal:
+ spin_unlock_irq(&sighand->siglock);
+
+ /*
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index 08ce36147c4c..8f7883b7d717 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -19,7 +19,6 @@
+ #include "main.h"
+
+ #include <linux/atomic.h>
+-#include <linux/bug.h>
+ #include <linux/byteorder/generic.h>
+ #include <linux/errno.h>
+ #include <linux/fs.h>
+@@ -172,8 +171,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
+ parent_dev = __dev_get_by_index((struct net *)parent_net,
+ dev_get_iflink(net_dev));
+ /* if we got a NULL parent_dev there is something broken.. */
+- if (WARN(!parent_dev, "Cannot find parent device"))
++ if (!parent_dev) {
++ pr_err("Cannot find parent device\n");
+ return false;
++ }
+
+ if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
+ return false;
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index 05bc176decf0..835af771a9fd 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -211,6 +211,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
+
+ netif_trans_update(soft_iface);
+ vid = batadv_get_vid(skb, 0);
++
++ skb_reset_mac_header(skb);
+ ethhdr = eth_hdr(skb);
+
+ switch (ntohs(ethhdr->h_proto)) {
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 5a8075d9f2e7..93eb606f7628 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -3186,9 +3186,10 @@ void ceph_con_keepalive(struct ceph_connection *con)
+ dout("con_keepalive %p\n", con);
+ mutex_lock(&con->mutex);
+ clear_standby(con);
++ con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
+ mutex_unlock(&con->mutex);
+- if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
+- con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
++
++ if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
+ queue_con(con);
+ }
+ EXPORT_SYMBOL(ceph_con_keepalive);
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 6a0fb9dbc1ba..f8de166b788a 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1852,9 +1852,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
+ int head_need, bool may_encrypt)
+ {
+ struct ieee80211_local *local = sdata->local;
++ struct ieee80211_hdr *hdr;
++ bool enc_tailroom;
+ int tail_need = 0;
+
+- if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
++ hdr = (struct ieee80211_hdr *) skb->data;
++ enc_tailroom = may_encrypt &&
++ (sdata->crypto_tx_tailroom_needed_cnt ||
++ ieee80211_is_mgmt(hdr->frame_control));
++
++ if (enc_tailroom) {
+ tail_need = IEEE80211_ENCRYPT_TAILROOM;
+ tail_need -= skb_tailroom(skb);
+ tail_need = max_t(int, tail_need, 0);
+@@ -1862,8 +1869,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
+
+ if (skb_cloned(skb) &&
+ (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
+- !skb_clone_writable(skb, ETH_HLEN) ||
+- (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
++ !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
+ I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
+ else if (head_need || tail_need)
+ I802_DEBUG_INC(local->tx_expand_skb_head);
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 026770884d46..f6f91c3b2de0 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1408,10 +1408,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
+ if (!ut[i].family)
+ ut[i].family = family;
+
+- if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
+- (ut[i].family != prev_family))
+- return -EINVAL;
+-
++ switch (ut[i].mode) {
++ case XFRM_MODE_TUNNEL:
++ case XFRM_MODE_BEET:
++ break;
++ default:
++ if (ut[i].family != prev_family)
++ return -EINVAL;
++ break;
++ }
+ if (ut[i].mode >= XFRM_MODE_MAX)
+ return -EINVAL;
+
+diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
+index 57d0d871dcf7..bb9988914a56 100644
+--- a/samples/mei/mei-amt-version.c
++++ b/samples/mei/mei-amt-version.c
+@@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid,
+
+ me->verbose = verbose;
+
+- me->fd = open("/dev/mei", O_RDWR);
++ me->fd = open("/dev/mei0", O_RDWR);
+ if (me->fd == -1) {
+ mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
+ goto err;
diff --git a/1157_linux-4.9.158.patch b/1157_linux-4.9.158.patch
new file mode 100644
index 00000000..b38b7c6f
--- /dev/null
+++ b/1157_linux-4.9.158.patch
@@ -0,0 +1,34 @@
+diff --git a/Makefile b/Makefile
+index 4eb7a17e18f1..2b8434aaeece 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 157
++SUBLEVEL = 158
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
+index 634bdbb23851..afdf4e3cafc2 100644
+--- a/fs/binfmt_script.c
++++ b/fs/binfmt_script.c
+@@ -43,14 +43,10 @@ static int load_script(struct linux_binprm *bprm)
+ fput(bprm->file);
+ bprm->file = NULL;
+
+- for (cp = bprm->buf+2;; cp++) {
+- if (cp >= bprm->buf + BINPRM_BUF_SIZE)
+- return -ENOEXEC;
+- if (!*cp || (*cp == '\n'))
+- break;
+- }
++ bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
++ if ((cp = strchr(bprm->buf, '\n')) == NULL)
++ cp = bprm->buf+BINPRM_BUF_SIZE-1;
+ *cp = '\0';
+-
+ while (cp > bprm->buf) {
+ cp--;
+ if ((*cp == ' ') || (*cp == '\t'))