summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-01-23 06:28:47 -0500
committerMike Pagano <mpagano@gentoo.org>2019-01-23 06:28:47 -0500
commit9c74cda091aec4d4602305f0d541cb3f77a8e723 (patch)
tree2a3746bb344ccd5d4d5e80072257f9028f774a1c
parentproj/linux-patches: Linux patch 4.9.151 (diff)
downloadlinux-patches-9c74cda0.tar.gz
linux-patches-9c74cda0.tar.bz2
linux-patches-9c74cda0.zip
proj/linux-patches: Linux patch 4.9.1524.9-155
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1151_linux-4.9.152.patch1581
2 files changed, 1585 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 93d08840..da300b62 100644
--- a/0000_README
+++ b/0000_README
@@ -647,6 +647,10 @@ Patch: 1150_linux-4.9.151.patch
From: http://www.kernel.org
Desc: Linux 4.9.151
+Patch: 1151_linux-4.9.152.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.152
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1151_linux-4.9.152.patch b/1151_linux-4.9.152.patch
new file mode 100644
index 00000000..33a8d7e6
--- /dev/null
+++ b/1151_linux-4.9.152.patch
@@ -0,0 +1,1581 @@
+diff --git a/Makefile b/Makefile
+index f1aeb98f9ace..27a9292fc0ed 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 151
++SUBLEVEL = 152
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
+index 68dedca5a47e..a11c8c2915c9 100644
+--- a/arch/arm64/include/asm/kvm_arm.h
++++ b/arch/arm64/include/asm/kvm_arm.h
+@@ -23,6 +23,8 @@
+ #include <asm/types.h>
+
+ /* Hyp Configuration Register (HCR) bits */
++#define HCR_API (UL(1) << 41)
++#define HCR_APK (UL(1) << 40)
+ #define HCR_E2H (UL(1) << 34)
+ #define HCR_ID (UL(1) << 33)
+ #define HCR_CD (UL(1) << 32)
+@@ -82,6 +84,7 @@
+ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
+ #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
+ #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
++#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
+ #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
+
+ /* TCR_EL2 Registers bits */
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index fa52817d84c5..3289d1458791 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -517,10 +517,9 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
+ #endif
+
+ /* Hyp configuration. */
+- mov x0, #HCR_RW // 64-bit EL1
++ mov_q x0, HCR_HOST_NVHE_FLAGS
+ cbz x2, set_hcr
+- orr x0, x0, #HCR_TGE // Enable Host Extensions
+- orr x0, x0, #HCR_E2H
++ mov_q x0, HCR_HOST_VHE_FLAGS
+ set_hcr:
+ msr hcr_el2, x0
+ isb
+diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
+index d7e90d97f5c4..2a21318fed1d 100644
+--- a/arch/arm64/kernel/kaslr.c
++++ b/arch/arm64/kernel/kaslr.c
+@@ -14,6 +14,7 @@
+ #include <linux/sched.h>
+ #include <linux/types.h>
+
++#include <asm/cacheflush.h>
+ #include <asm/fixmap.h>
+ #include <asm/kernel-pgtable.h>
+ #include <asm/memory.h>
+@@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
+ return ret;
+ }
+
+-static __init const u8 *get_cmdline(void *fdt)
++static __init const u8 *kaslr_get_cmdline(void *fdt)
+ {
+ static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
+
+@@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
+ * Check if 'nokaslr' appears on the command line, and
+ * return 0 if that is the case.
+ */
+- cmdline = get_cmdline(fdt);
++ cmdline = kaslr_get_cmdline(fdt);
+ str = strstr(cmdline, "nokaslr");
+ if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
+ return 0;
+@@ -178,5 +179,8 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
+ module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
+ module_alloc_base &= PAGE_MASK;
+
++ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
++ __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
++
+ return offset;
+ }
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
+index 12f9d1ecdf4c..115b0955715f 100644
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -112,7 +112,7 @@ static void __hyp_text __deactivate_traps_vhe(void)
+
+ static void __hyp_text __deactivate_traps_nvhe(void)
+ {
+- write_sysreg(HCR_RW, hcr_el2);
++ write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
+ write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
+ }
+
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 34fbbf8fdeaa..1d987061d1a1 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -3135,6 +3135,7 @@ config MIPS32_O32
+ config MIPS32_N32
+ bool "Kernel support for n32 binaries"
+ depends on 64BIT
++ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+ select COMPAT
+ select MIPS32_COMPAT
+ select SYSVIPC_COMPAT if SYSVIPC
+diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
+index 2a5bb849b10e..288b58b00dc8 100644
+--- a/arch/mips/pci/msi-octeon.c
++++ b/arch/mips/pci/msi-octeon.c
+@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
+ int irq;
+ struct irq_chip *msi;
+
+- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
++ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
++ return 0;
++ } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
+ msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
+ msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
+ msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
+diff --git a/crypto/authenc.c b/crypto/authenc.c
+index c3180eb6d1ee..6bfec690ca5b 100644
+--- a/crypto/authenc.c
++++ b/crypto/authenc.c
+@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ return -EINVAL;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ return -EINVAL;
+- if (RTA_PAYLOAD(rta) < sizeof(*param))
++
++ /*
++ * RTA_OK() didn't align the rtattr's payload when validating that it
++ * fits in the buffer. Yet, the keys should start on the next 4-byte
++ * aligned boundary. To avoid confusion, require that the rtattr
++ * payload be exactly the param struct, which has a 4-byte aligned size.
++ */
++ if (RTA_PAYLOAD(rta) != sizeof(*param))
+ return -EINVAL;
++ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
+
+ param = RTA_DATA(rta);
+ keys->enckeylen = be32_to_cpu(param->enckeylen);
+
+- key += RTA_ALIGN(rta->rta_len);
+- keylen -= RTA_ALIGN(rta->rta_len);
++ key += rta->rta_len;
++ keylen -= rta->rta_len;
+
+ if (keylen < keys->enckeylen)
+ return -EINVAL;
+diff --git a/crypto/authencesn.c b/crypto/authencesn.c
+index 49e7e85a23d5..73b12f128ae5 100644
+--- a/crypto/authencesn.c
++++ b/crypto/authencesn.c
+@@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
+ struct aead_request *req = areq->data;
+
+ err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
+- aead_request_complete(req, err);
++ authenc_esn_request_complete(req, err);
+ }
+
+ static int crypto_authenc_esn_decrypt(struct aead_request *req)
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 9f840d9fdfcb..344f34746c10 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -81,7 +81,7 @@
+ #include <asm/uaccess.h>
+
+ static DEFINE_IDR(loop_index_idr);
+-static DEFINE_MUTEX(loop_index_mutex);
++static DEFINE_MUTEX(loop_ctl_mutex);
+
+ static int max_part;
+ static int part_shift;
+@@ -1033,7 +1033,7 @@ static int loop_clr_fd(struct loop_device *lo)
+ */
+ if (atomic_read(&lo->lo_refcnt) > 1) {
+ lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
+- mutex_unlock(&lo->lo_ctl_mutex);
++ mutex_unlock(&loop_ctl_mutex);
+ return 0;
+ }
+
+@@ -1082,12 +1082,12 @@ static int loop_clr_fd(struct loop_device *lo)
+ if (!part_shift)
+ lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
+ loop_unprepare_queue(lo);
+- mutex_unlock(&lo->lo_ctl_mutex);
++ mutex_unlock(&loop_ctl_mutex);
+ /*
+- * Need not hold lo_ctl_mutex to fput backing file.
+- * Calling fput holding lo_ctl_mutex triggers a circular
++ * Need not hold loop_ctl_mutex to fput backing file.
++ * Calling fput holding loop_ctl_mutex triggers a circular
+ * lock dependency possibility warning as fput can take
+- * bd_mutex which is usually taken before lo_ctl_mutex.
++ * bd_mutex which is usually taken before loop_ctl_mutex.
+ */
+ fput(filp);
+ return 0;
+@@ -1350,7 +1350,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ struct loop_device *lo = bdev->bd_disk->private_data;
+ int err;
+
+- mutex_lock_nested(&lo->lo_ctl_mutex, 1);
++ mutex_lock_nested(&loop_ctl_mutex, 1);
+ switch (cmd) {
+ case LOOP_SET_FD:
+ err = loop_set_fd(lo, mode, bdev, arg);
+@@ -1359,7 +1359,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ err = loop_change_fd(lo, bdev, arg);
+ break;
+ case LOOP_CLR_FD:
+- /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
++ /* loop_clr_fd would have unlocked loop_ctl_mutex on success */
+ err = loop_clr_fd(lo);
+ if (!err)
+ goto out_unlocked;
+@@ -1395,7 +1395,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ default:
+ err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
+ }
+- mutex_unlock(&lo->lo_ctl_mutex);
++ mutex_unlock(&loop_ctl_mutex);
+
+ out_unlocked:
+ return err;
+@@ -1528,16 +1528,16 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
+
+ switch(cmd) {
+ case LOOP_SET_STATUS:
+- mutex_lock(&lo->lo_ctl_mutex);
++ mutex_lock(&loop_ctl_mutex);
+ err = loop_set_status_compat(
+ lo, (const struct compat_loop_info __user *) arg);
+- mutex_unlock(&lo->lo_ctl_mutex);
++ mutex_unlock(&loop_ctl_mutex);
+ break;
+ case LOOP_GET_STATUS:
+- mutex_lock(&lo->lo_ctl_mutex);
++ mutex_lock(&loop_ctl_mutex);
+ err = loop_get_status_compat(
+ lo, (struct compat_loop_info __user *) arg);
+- mutex_unlock(&lo->lo_ctl_mutex);
++ mutex_unlock(&loop_ctl_mutex);
+ break;
+ case LOOP_SET_CAPACITY:
+ case LOOP_CLR_FD:
+@@ -1559,9 +1559,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ static int lo_open(struct block_device *bdev, fmode_t mode)
+ {
+ struct loop_device *lo;
+- int err = 0;
++ int err;
+
+- mutex_lock(&loop_index_mutex);
++ err = mutex_lock_killable(&loop_ctl_mutex);
++ if (err)
++ return err;
+ lo = bdev->bd_disk->private_data;
+ if (!lo) {
+ err = -ENXIO;
+@@ -1570,18 +1572,20 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
+
+ atomic_inc(&lo->lo_refcnt);
+ out:
+- mutex_unlock(&loop_index_mutex);
++ mutex_unlock(&loop_ctl_mutex);
+ return err;
+ }
+
+-static void __lo_release(struct loop_device *lo)
++static void lo_release(struct gendisk *disk, fmode_t mode)
+ {
++ struct loop_device *lo;
+ int err;
+
++ mutex_lock(&loop_ctl_mutex);
++ lo = disk->private_data;
+ if (atomic_dec_return(&lo->lo_refcnt))
+- return;
++ goto out_unlock;
+
+- mutex_lock(&lo->lo_ctl_mutex);
+ if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
+ /*
+ * In autoclear mode, stop the loop thread
+@@ -1598,14 +1602,8 @@ static void __lo_release(struct loop_device *lo)
+ loop_flush(lo);
+ }
+
+- mutex_unlock(&lo->lo_ctl_mutex);
+-}
+-
+-static void lo_release(struct gendisk *disk, fmode_t mode)
+-{
+- mutex_lock(&loop_index_mutex);
+- __lo_release(disk->private_data);
+- mutex_unlock(&loop_index_mutex);
++out_unlock:
++ mutex_unlock(&loop_ctl_mutex);
+ }
+
+ static const struct block_device_operations lo_fops = {
+@@ -1644,10 +1642,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
+ struct loop_device *lo = ptr;
+ struct loop_func_table *xfer = data;
+
+- mutex_lock(&lo->lo_ctl_mutex);
++ mutex_lock(&loop_ctl_mutex);
+ if (lo->lo_encryption == xfer)
+ loop_release_xfer(lo);
+- mutex_unlock(&lo->lo_ctl_mutex);
++ mutex_unlock(&loop_ctl_mutex);
+ return 0;
+ }
+
+@@ -1813,7 +1811,6 @@ static int loop_add(struct loop_device **l, int i)
+ if (!part_shift)
+ disk->flags |= GENHD_FL_NO_PART_SCAN;
+ disk->flags |= GENHD_FL_EXT_DEVT;
+- mutex_init(&lo->lo_ctl_mutex);
+ atomic_set(&lo->lo_refcnt, 0);
+ lo->lo_number = i;
+ spin_lock_init(&lo->lo_lock);
+@@ -1892,7 +1889,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
+ struct kobject *kobj;
+ int err;
+
+- mutex_lock(&loop_index_mutex);
++ mutex_lock(&loop_ctl_mutex);
+ err = loop_lookup(&lo, MINOR(dev) >> part_shift);
+ if (err < 0)
+ err = loop_add(&lo, MINOR(dev) >> part_shift);
+@@ -1900,7 +1897,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
+ kobj = NULL;
+ else
+ kobj = get_disk(lo->lo_disk);
+- mutex_unlock(&loop_index_mutex);
++ mutex_unlock(&loop_ctl_mutex);
+
+ *part = 0;
+ return kobj;
+@@ -1910,9 +1907,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
+ unsigned long parm)
+ {
+ struct loop_device *lo;
+- int ret = -ENOSYS;
++ int ret;
++
++ ret = mutex_lock_killable(&loop_ctl_mutex);
++ if (ret)
++ return ret;
+
+- mutex_lock(&loop_index_mutex);
++ ret = -ENOSYS;
+ switch (cmd) {
+ case LOOP_CTL_ADD:
+ ret = loop_lookup(&lo, parm);
+@@ -1926,19 +1927,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
+ ret = loop_lookup(&lo, parm);
+ if (ret < 0)
+ break;
+- mutex_lock(&lo->lo_ctl_mutex);
+ if (lo->lo_state != Lo_unbound) {
+ ret = -EBUSY;
+- mutex_unlock(&lo->lo_ctl_mutex);
+ break;
+ }
+ if (atomic_read(&lo->lo_refcnt) > 0) {
+ ret = -EBUSY;
+- mutex_unlock(&lo->lo_ctl_mutex);
+ break;
+ }
+ lo->lo_disk->private_data = NULL;
+- mutex_unlock(&lo->lo_ctl_mutex);
+ idr_remove(&loop_index_idr, lo->lo_number);
+ loop_remove(lo);
+ break;
+@@ -1948,7 +1945,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
+ break;
+ ret = loop_add(&lo, -1);
+ }
+- mutex_unlock(&loop_index_mutex);
++ mutex_unlock(&loop_ctl_mutex);
+
+ return ret;
+ }
+@@ -2031,10 +2028,10 @@ static int __init loop_init(void)
+ THIS_MODULE, loop_probe, NULL, NULL);
+
+ /* pre-create number of devices given by config or max_loop */
+- mutex_lock(&loop_index_mutex);
++ mutex_lock(&loop_ctl_mutex);
+ for (i = 0; i < nr; i++)
+ loop_add(&lo, i);
+- mutex_unlock(&loop_index_mutex);
++ mutex_unlock(&loop_ctl_mutex);
+
+ printk(KERN_INFO "loop: module loaded\n");
+ return 0;
+diff --git a/drivers/block/loop.h b/drivers/block/loop.h
+index 60f0fd2c0c65..a923e74495ce 100644
+--- a/drivers/block/loop.h
++++ b/drivers/block/loop.h
+@@ -55,7 +55,6 @@ struct loop_device {
+
+ spinlock_t lo_lock;
+ int lo_state;
+- struct mutex lo_ctl_mutex;
+ struct kthread_worker worker;
+ struct task_struct *worker_task;
+ bool use_dio;
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 42a53956aefe..394f8ec83cf0 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -108,7 +108,7 @@ static const char *nbdcmd_to_ascii(int cmd)
+
+ static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
+ {
+- bdev->bd_inode->i_size = 0;
++ bd_set_size(bdev, 0);
+ set_capacity(nbd->disk, 0);
+ kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
+
+@@ -117,29 +117,21 @@ static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
+
+ static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
+ {
+- if (!nbd_is_connected(nbd))
+- return;
+-
+- bdev->bd_inode->i_size = nbd->bytesize;
++ blk_queue_logical_block_size(nbd->disk->queue, nbd->blksize);
++ blk_queue_physical_block_size(nbd->disk->queue, nbd->blksize);
++ bd_set_size(bdev, nbd->bytesize);
++ set_blocksize(bdev, nbd->blksize);
+ set_capacity(nbd->disk, nbd->bytesize >> 9);
+ kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
+ }
+
+-static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
++static void nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
+ loff_t blocksize, loff_t nr_blocks)
+ {
+- int ret;
+-
+- ret = set_blocksize(bdev, blocksize);
+- if (ret)
+- return ret;
+-
+ nbd->blksize = blocksize;
+ nbd->bytesize = blocksize * nr_blocks;
+-
+- nbd_size_update(nbd, bdev);
+-
+- return 0;
++ if (nbd_is_connected(nbd))
++ nbd_size_update(nbd, bdev);
+ }
+
+ static void nbd_end_request(struct nbd_cmd *cmd)
+@@ -655,16 +647,17 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
+ case NBD_SET_BLKSIZE: {
+ loff_t bsize = div_s64(nbd->bytesize, arg);
+
+- return nbd_size_set(nbd, bdev, arg, bsize);
++ nbd_size_set(nbd, bdev, arg, bsize);
++ return 0;
+ }
+
+ case NBD_SET_SIZE:
+- return nbd_size_set(nbd, bdev, nbd->blksize,
+- div_s64(arg, nbd->blksize));
+-
++ nbd_size_set(nbd, bdev, nbd->blksize,
++ div_s64(arg, nbd->blksize));
++ return 0;
+ case NBD_SET_SIZE_BLOCKS:
+- return nbd_size_set(nbd, bdev, nbd->blksize, arg);
+-
++ nbd_size_set(nbd, bdev, nbd->blksize, arg);
++ return 0;
+ case NBD_SET_TIMEOUT:
+ if (arg) {
+ nbd->tag_set.timeout = arg * HZ;
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
+index 631337c2e4a7..9eac4dcb9971 100644
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -1232,13 +1232,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
+
+ desc = edesc->hw_desc;
+
+- state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, state->buf_dma)) {
+- dev_err(jrdev, "unable to map src\n");
+- goto unmap;
+- }
++ if (buflen) {
++ state->buf_dma = dma_map_single(jrdev, buf, buflen,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(jrdev, state->buf_dma)) {
++ dev_err(jrdev, "unable to map src\n");
++ goto unmap;
++ }
+
+- append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
++ append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
++ }
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 7c71722be395..463033b4db1d 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1347,23 +1347,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+ int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
+- void *err;
+
+ if (cryptlen + authsize > max_len) {
+ dev_err(dev, "length exceeds h/w max limit\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+- if (ivsize)
+- iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+-
+ if (!dst || dst == src) {
+ src_len = assoclen + cryptlen + authsize;
+ src_nents = sg_nents_for_len(src, src_len);
+ if (src_nents < 0) {
+ dev_err(dev, "Invalid number of src SG.\n");
+- err = ERR_PTR(-EINVAL);
+- goto error_sg;
++ return ERR_PTR(-EINVAL);
+ }
+ src_nents = (src_nents == 1) ? 0 : src_nents;
+ dst_nents = dst ? src_nents : 0;
+@@ -1373,16 +1368,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
+ src_nents = sg_nents_for_len(src, src_len);
+ if (src_nents < 0) {
+ dev_err(dev, "Invalid number of src SG.\n");
+- err = ERR_PTR(-EINVAL);
+- goto error_sg;
++ return ERR_PTR(-EINVAL);
+ }
+ src_nents = (src_nents == 1) ? 0 : src_nents;
+ dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
+ dst_nents = sg_nents_for_len(dst, dst_len);
+ if (dst_nents < 0) {
+ dev_err(dev, "Invalid number of dst SG.\n");
+- err = ERR_PTR(-EINVAL);
+- goto error_sg;
++ return ERR_PTR(-EINVAL);
+ }
+ dst_nents = (dst_nents == 1) ? 0 : dst_nents;
+ }
+@@ -1405,12 +1398,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
+ dma_len = 0;
+ alloc_len += icv_stashing ? authsize : 0;
+ }
++ alloc_len += ivsize;
+
+ edesc = kmalloc(alloc_len, GFP_DMA | flags);
+- if (!edesc) {
+- dev_err(dev, "could not allocate edescriptor\n");
+- err = ERR_PTR(-ENOMEM);
+- goto error_sg;
++ if (!edesc)
++ return ERR_PTR(-ENOMEM);
++ if (ivsize) {
++ iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
++ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ }
+
+ edesc->src_nents = src_nents;
+@@ -1423,10 +1418,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
+ DMA_BIDIRECTIONAL);
+
+ return edesc;
+-error_sg:
+- if (iv_dma)
+- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+- return err;
+ }
+
+ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 6a48d6637e5c..2e85e609f125 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -1238,9 +1238,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ struct drm_framebuffer *fb = fb_helper->fb;
+ int depth;
+
+- if (var->pixclock != 0 || in_dbg_master())
++ if (in_dbg_master())
+ return -EINVAL;
+
++ if (var->pixclock != 0) {
++ DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
++ var->pixclock = 0;
++ }
++
+ /* Need to resize the fb object !!! */
+ if (var->bits_per_pixel > fb->bits_per_pixel ||
+ var->xres > fb->width || var->yres > fb->height ||
+diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
+index 6ca71aabb576..d300e5e7eadc 100644
+--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
++++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
+@@ -877,8 +877,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
+ "%s-vid-cap", dev->v4l2_dev.name);
+
+ if (IS_ERR(dev->kthread_vid_cap)) {
++ int err = PTR_ERR(dev->kthread_vid_cap);
++
++ dev->kthread_vid_cap = NULL;
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+- return PTR_ERR(dev->kthread_vid_cap);
++ return err;
+ }
+ *pstreaming = true;
+ vivid_grab_controls(dev, true);
+diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
+index 98eed5889bc1..7c8d75852816 100644
+--- a/drivers/media/platform/vivid/vivid-kthread-out.c
++++ b/drivers/media/platform/vivid/vivid-kthread-out.c
+@@ -248,8 +248,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
+ "%s-vid-out", dev->v4l2_dev.name);
+
+ if (IS_ERR(dev->kthread_vid_out)) {
++ int err = PTR_ERR(dev->kthread_vid_out);
++
++ dev->kthread_vid_out = NULL;
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+- return PTR_ERR(dev->kthread_vid_out);
++ return err;
+ }
+ *pstreaming = true;
+ vivid_grab_controls(dev, true);
+diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
+index fcda3ae4e6b0..f9a810e3f521 100644
+--- a/drivers/media/platform/vivid/vivid-vid-common.c
++++ b/drivers/media/platform/vivid/vivid-vid-common.c
+@@ -33,7 +33,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+- V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
++ V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
+diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
+index 1ed7ba3dfdbe..ae959e01042a 100644
+--- a/drivers/media/usb/em28xx/em28xx-video.c
++++ b/drivers/media/usb/em28xx/em28xx-video.c
+@@ -1062,6 +1062,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
+
+ em28xx_videodbg("%s\n", __func__);
+
++ dev->v4l2->field_count = 0;
++
+ /* Make sure streaming is not already in progress for this type
+ of filehandle (e.g. video, vbi) */
+ rc = res_get(dev, vq->type);
+@@ -1290,8 +1292,6 @@ static void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
+ {
+ struct em28xx *dev = priv;
+
+- dev->v4l2->field_count = 0;
+-
+ /*
+ * In the case of non-AC97 volume controls, we still need
+ * to do some setups at em28xx, in order to mute/unmute
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index 4df4a1f402be..b1a4d4e2341b 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -1916,9 +1916,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
+ return -EINVAL;
+ }
+ }
++
++ mutex_lock(&q->mmap_lock);
++
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "mmap: file io in progress\n");
+- return -EBUSY;
++ ret = -EBUSY;
++ goto unlock;
+ }
+
+ /*
+@@ -1926,7 +1930,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
+ */
+ ret = __find_plane_by_offset(q, off, &buffer, &plane);
+ if (ret)
+- return ret;
++ goto unlock;
+
+ vb = q->bufs[buffer];
+
+@@ -1939,11 +1943,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
+ if (length < (vma->vm_end - vma->vm_start)) {
+ dprintk(1,
+ "MMAP invalid, as it would overflow buffer length\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto unlock;
+ }
+
+- mutex_lock(&q->mmap_lock);
+ ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
++
++unlock:
+ mutex_unlock(&q->mmap_lock);
+ if (ret)
+ return ret;
+diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
+index 5628a6b5b19b..c5c320efc7b4 100644
+--- a/drivers/mfd/tps6586x.c
++++ b/drivers/mfd/tps6586x.c
+@@ -594,6 +594,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
+ return 0;
+ }
+
++static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
++{
++ struct tps6586x *tps6586x = dev_get_drvdata(dev);
++
++ if (tps6586x->client->irq)
++ disable_irq(tps6586x->client->irq);
++
++ return 0;
++}
++
++static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
++{
++ struct tps6586x *tps6586x = dev_get_drvdata(dev);
++
++ if (tps6586x->client->irq)
++ enable_irq(tps6586x->client->irq);
++
++ return 0;
++}
++
++static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
++ tps6586x_i2c_resume);
++
+ static const struct i2c_device_id tps6586x_id_table[] = {
+ { "tps6586x", 0 },
+ { },
+@@ -604,6 +627,7 @@ static struct i2c_driver tps6586x_driver = {
+ .driver = {
+ .name = "tps6586x",
+ .of_match_table = of_match_ptr(tps6586x_of_match),
++ .pm = &tps6586x_pm_ops,
+ },
+ .probe = tps6586x_i2c_probe,
+ .remove = tps6586x_i2c_remove,
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 389d1db69a32..24a3433f3944 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1900,6 +1900,9 @@ static int __bond_release_one(struct net_device *bond_dev,
+ if (!bond_has_slaves(bond)) {
+ bond_set_carrier(bond);
+ eth_hw_addr_random(bond_dev);
++ bond->nest_level = SINGLE_DEPTH_NESTING;
++ } else {
++ bond->nest_level = dev_get_nest_level(bond_dev) + 1;
+ }
+
+ unblock_netpoll_tx();
+diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
+index b44c1bb687a2..ebc193f7f7dd 100644
+--- a/drivers/scsi/scsi_pm.c
++++ b/drivers/scsi/scsi_pm.c
+@@ -79,8 +79,22 @@ static int scsi_dev_type_resume(struct device *dev,
+
+ if (err == 0) {
+ pm_runtime_disable(dev);
+- pm_runtime_set_active(dev);
++ err = pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
++
++ /*
++ * Forcibly set runtime PM status of request queue to "active"
++ * to make sure we can again get requests from the queue
++ * (see also blk_pm_peek_request()).
++ *
++ * The resume hook will correct runtime PM status of the disk.
++ */
++ if (!err && scsi_is_sdev_device(dev)) {
++ struct scsi_device *sdev = to_scsi_device(dev);
++
++ if (sdev->request_queue->dev)
++ blk_set_runtime_active(sdev->request_queue);
++ }
+ }
+
+ return err;
+@@ -139,16 +153,6 @@ static int scsi_bus_resume_common(struct device *dev,
+ else
+ fn = NULL;
+
+- /*
+- * Forcibly set runtime PM status of request queue to "active" to
+- * make sure we can again get requests from the queue (see also
+- * blk_pm_peek_request()).
+- *
+- * The resume hook will correct runtime PM status of the disk.
+- */
+- if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
+- blk_set_runtime_active(to_scsi_device(dev)->request_queue);
+-
+ if (fn) {
+ async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
+
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index ab999c4444b8..867ae76f93f2 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -208,6 +208,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
+ sp = buffer_data[0] & 0x80 ? 1 : 0;
+ buffer_data[0] &= ~0x80;
+
++ /*
++ * Ensure WP, DPOFUA, and RESERVED fields are cleared in
++ * received mode parameter buffer before doing MODE SELECT.
++ */
++ data.device_specific = 0;
++
+ if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
+ SD_MAX_RETRIES, &data, &sshdr)) {
+ if (scsi_sense_valid(&sshdr))
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index e6c4321d695c..f61f8650665f 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1475,7 +1475,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
+ static int tty_reopen(struct tty_struct *tty)
+ {
+ struct tty_driver *driver = tty->driver;
+- int retval;
++ struct tty_ldisc *ld;
++ int retval = 0;
+
+ if (driver->type == TTY_DRIVER_TYPE_PTY &&
+ driver->subtype == PTY_TYPE_MASTER)
+@@ -1487,14 +1488,21 @@ static int tty_reopen(struct tty_struct *tty)
+ if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
+ return -EBUSY;
+
+- tty->count++;
++ ld = tty_ldisc_ref_wait(tty);
++ if (ld) {
++ tty_ldisc_deref(ld);
++ } else {
++ retval = tty_ldisc_lock(tty, 5 * HZ);
++ if (retval)
++ return retval;
+
+- if (tty->ldisc)
+- return 0;
++ if (!tty->ldisc)
++ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
++ tty_ldisc_unlock(tty);
++ }
+
+- retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+- if (retval)
+- tty->count--;
++ if (retval == 0)
++ tty->count++;
+
+ return retval;
+ }
+diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
+index 1bf8ed13f827..dbd7ba32caac 100644
+--- a/drivers/tty/tty_ldsem.c
++++ b/drivers/tty/tty_ldsem.c
+@@ -307,6 +307,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
+ if (!locked)
+ ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
+ list_del(&waiter.list);
++
++ /*
++ * In case of timeout, wake up every reader who gave the right of way
++ * to writer. Prevent separation readers into two groups:
++ * one that helds semaphore and another that sleeps.
++ * (in case of no contention with a writer)
++ */
++ if (!locked && list_empty(&sem->write_wait))
++ __ldsem_wake_readers(sem);
++
+ raw_spin_unlock_irq(&sem->wait_lock);
+
+ __set_task_state(tsk, TASK_RUNNING);
+diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+index a3edb20ea4c3..a846d32ee653 100644
+--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
++++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
+
+ int r = 0;
+
++ memset(&p, 0, sizeof(p));
++
+ switch (cmd) {
+ case OMAPFB_SYNC_GFX:
+ DBG("ioctl SYNC_GFX\n");
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index cb936c90ae82..8a894cd4875b 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -114,6 +114,20 @@ void invalidate_bdev(struct block_device *bdev)
+ }
+ EXPORT_SYMBOL(invalidate_bdev);
+
++static void set_init_blocksize(struct block_device *bdev)
++{
++ unsigned bsize = bdev_logical_block_size(bdev);
++ loff_t size = i_size_read(bdev->bd_inode);
++
++ while (bsize < PAGE_SIZE) {
++ if (size & bsize)
++ break;
++ bsize <<= 1;
++ }
++ bdev->bd_block_size = bsize;
++ bdev->bd_inode->i_blkbits = blksize_bits(bsize);
++}
++
+ int set_blocksize(struct block_device *bdev, int size)
+ {
+ /* Size must be a power of two, and between 512 and PAGE_SIZE */
+@@ -1209,18 +1223,9 @@ EXPORT_SYMBOL(check_disk_change);
+
+ void bd_set_size(struct block_device *bdev, loff_t size)
+ {
+- unsigned bsize = bdev_logical_block_size(bdev);
+-
+ inode_lock(bdev->bd_inode);
+ i_size_write(bdev->bd_inode, size);
+ inode_unlock(bdev->bd_inode);
+- while (bsize < PAGE_SIZE) {
+- if (size & bsize)
+- break;
+- bsize <<= 1;
+- }
+- bdev->bd_block_size = bsize;
+- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
+ }
+ EXPORT_SYMBOL(bd_set_size);
+
+@@ -1297,8 +1302,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ }
+ }
+
+- if (!ret)
++ if (!ret) {
+ bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
++ set_init_blocksize(bdev);
++ }
+
+ /*
+ * If the device is invalidated, rescan partition
+@@ -1333,6 +1340,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ goto out_clear;
+ }
+ bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
++ set_init_blocksize(bdev);
+ }
+ } else {
+ if (bdev->bd_contains == bdev) {
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 77b32415d9f2..9d3352fe8dc9 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4193,6 +4193,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
+ spin_lock(&fs_info->ordered_root_lock);
+ }
+ spin_unlock(&fs_info->ordered_root_lock);
++
++ /*
++ * We need this here because if we've been flipped read-only we won't
++ * get sync() from the umount, so we need to make sure any ordered
++ * extents that haven't had their dirty pages IO start writeout yet
++ * actually get run and error out properly.
++ */
++ btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
+ }
+
+ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index ab4cbb4be423..e59eeaf02eaa 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -196,32 +196,6 @@ static void recover_inode(struct inode *inode, struct page *page)
+ ino_of_node(page), name);
+ }
+
+-static bool is_same_inode(struct inode *inode, struct page *ipage)
+-{
+- struct f2fs_inode *ri = F2FS_INODE(ipage);
+- struct timespec disk;
+-
+- if (!IS_INODE(ipage))
+- return true;
+-
+- disk.tv_sec = le64_to_cpu(ri->i_ctime);
+- disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
+- if (timespec_compare(&inode->i_ctime, &disk) > 0)
+- return false;
+-
+- disk.tv_sec = le64_to_cpu(ri->i_atime);
+- disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
+- if (timespec_compare(&inode->i_atime, &disk) > 0)
+- return false;
+-
+- disk.tv_sec = le64_to_cpu(ri->i_mtime);
+- disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
+- if (timespec_compare(&inode->i_mtime, &disk) > 0)
+- return false;
+-
+- return true;
+-}
+-
+ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
+ {
+ struct curseg_info *curseg;
+@@ -248,10 +222,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
+ goto next;
+
+ entry = get_fsync_inode(head, ino_of_node(page));
+- if (entry) {
+- if (!is_same_inode(entry->inode, page))
+- goto next;
+- } else {
++ if (!entry) {
+ if (IS_INODE(page) && is_dent_dnode(page)) {
+ err = recover_inode_page(sbi, page);
+ if (err)
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 94f83e74db24..712b44c63701 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -346,8 +346,9 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
+ {
+ #ifdef CONFIG_SECCOMP
+ seq_put_decimal_ull(m, "Seccomp:\t", p->seccomp.mode);
++ seq_putc(m, '\n');
+ #endif
+- seq_printf(m, "\nSpeculation_Store_Bypass:\t");
++ seq_printf(m, "Speculation_Store_Bypass:\t");
+ switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
+ case -EINVAL:
+ seq_printf(m, "unknown");
+diff --git a/mm/memory.c b/mm/memory.c
+index f3fef1df7402..35d8217bb046 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2823,6 +2823,28 @@ static int __do_fault(struct fault_env *fe, pgoff_t pgoff,
+ struct vm_fault vmf;
+ int ret;
+
++ /*
++ * Preallocate pte before we take page_lock because this might lead to
++ * deadlocks for memcg reclaim which waits for pages under writeback:
++ * lock_page(A)
++ * SetPageWriteback(A)
++ * unlock_page(A)
++ * lock_page(B)
++ * lock_page(B)
++ * pte_alloc_pne
++ * shrink_page_list
++ * wait_on_page_writeback(A)
++ * SetPageWriteback(B)
++ * unlock_page(B)
++ * # flush A, B to clear the writeback
++ */
++ if (pmd_none(*fe->pmd) && !fe->prealloc_pte) {
++ fe->prealloc_pte = pte_alloc_one(vma->vm_mm, fe->address);
++ if (!fe->prealloc_pte)
++ return VM_FAULT_OOM;
++ smp_wmb(); /* See comment in __pte_alloc() */
++ }
++
+ vmf.virtual_address = (void __user *)(fe->address & PAGE_MASK);
+ vmf.pgoff = pgoff;
+ vmf.flags = fe->flags;
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 82ce5713f744..7e42c0d1f55b 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -275,7 +275,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ int ret;
+
+- if (neigh->hh.hh_len) {
++ if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
+ neigh_hh_bridge(&neigh->hh, skb);
+ skb->dev = nf_bridge->physindev;
+ ret = br_handle_frame_finish(net, sk, skb);
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 18c1f07e4f3b..c7e5aaf2eeb8 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1147,14 +1147,16 @@ static int do_replace(struct net *net, const void __user *user,
+ tmp.name[sizeof(tmp.name) - 1] = 0;
+
+ countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
+- newinfo = vmalloc(sizeof(*newinfo) + countersize);
++ newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
++ PAGE_KERNEL);
+ if (!newinfo)
+ return -ENOMEM;
+
+ if (countersize)
+ memset(newinfo->counters, 0, countersize);
+
+- newinfo->entries = vmalloc(tmp.entries_size);
++ newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
++ PAGE_KERNEL);
+ if (!newinfo->entries) {
+ ret = -ENOMEM;
+ goto free_newinfo;
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 77c8af4047ef..81650affa3fa 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -418,13 +418,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
+ (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
+
+- /* check for checksum updates when the CAN frame has been modified */
++ /* Has the CAN frame been modified? */
+ if (modidx) {
+- if (gwj->mod.csumfunc.crc8)
++ /* get available space for the processed CAN frame type */
++ int max_len = nskb->len - offsetof(struct can_frame, data);
++
++ /* dlc may have changed, make sure it fits to the CAN frame */
++ if (cf->can_dlc > max_len)
++ goto out_delete;
++
++ /* check for checksum updates in classic CAN length only */
++ if (gwj->mod.csumfunc.crc8) {
++ if (cf->can_dlc > 8)
++ goto out_delete;
++
+ (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
++ }
++
++ if (gwj->mod.csumfunc.xor) {
++ if (cf->can_dlc > 8)
++ goto out_delete;
+
+- if (gwj->mod.csumfunc.xor)
+ (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
++ }
+ }
+
+ /* clear the skb timestamp if not configured the other way */
+@@ -436,6 +452,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ gwj->dropped_frames++;
+ else
+ gwj->handled_frames++;
++
++ return;
++
++ out_delete:
++ /* delete frame due to misconfiguration */
++ gwj->deleted_frames++;
++ kfree_skb(nskb);
++ return;
+ }
+
+ static inline int cgw_register_filter(struct cgw_job *gwj)
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index a5851c0bc278..e39895ea1b77 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -133,19 +133,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
+
+ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
+ {
++ __be16 _ports[2], *ports;
+ struct sockaddr_in sin;
+- __be16 *ports;
+- int end;
+-
+- end = skb_transport_offset(skb) + 4;
+- if (end > 0 && !pskb_may_pull(skb, end))
+- return;
+
+ /* All current transport protocols have the port numbers in the
+ * first four bytes of the transport header and this function is
+ * written with this assumption in mind.
+ */
+- ports = (__be16 *)skb_transport_header(skb);
++ ports = skb_header_pointer(skb, skb_transport_offset(skb),
++ sizeof(_ports), &_ports);
++ if (!ports)
++ return;
+
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 2d3c8fe27583..956af11e9ba3 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -335,6 +335,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
+ skb_reset_network_header(skb);
+ iph = ipv6_hdr(skb);
+ iph->daddr = fl6->daddr;
++ ip6_flow_hdr(iph, 0, 0);
+
+ serr = SKB_EXT_ERR(skb);
+ serr->ee.ee_errno = err;
+@@ -694,17 +695,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
+ }
+ if (np->rxopt.bits.rxorigdstaddr) {
+ struct sockaddr_in6 sin6;
+- __be16 *ports;
+- int end;
++ __be16 _ports[2], *ports;
+
+- end = skb_transport_offset(skb) + 4;
+- if (end <= 0 || pskb_may_pull(skb, end)) {
++ ports = skb_header_pointer(skb, skb_transport_offset(skb),
++ sizeof(_ports), &_ports);
++ if (ports) {
+ /* All current transport protocols have the port numbers in the
+ * first four bytes of the transport header and this function is
+ * written with this assumption in mind.
+ */
+- ports = (__be16 *)skb_transport_header(skb);
+-
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_addr = ipv6_hdr(skb)->daddr;
+ sin6.sin6_port = ports[1];
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index a9d0358d4f3b..82e222cd4845 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2663,7 +2663,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
+ dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
+ if (addr && dev && saddr->sll_halen < dev->addr_len)
+- goto out;
++ goto out_put;
+ }
+
+ err = -ENXIO;
+@@ -2862,7 +2862,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
+ dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
+ if (addr && dev && saddr->sll_halen < dev->addr_len)
+- goto out;
++ goto out_unlock;
+ }
+
+ err = -ENXIO;
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index e7866d47934d..31f461f955ec 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
+
+ switch (ev) {
+ case NETDEV_UP:
+- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
++ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
+ if (addr) {
+ addr->a.v6.sin6_family = AF_INET6;
+- addr->a.v6.sin6_port = 0;
+- addr->a.v6.sin6_flowinfo = 0;
+ addr->a.v6.sin6_addr = ifa->addr;
+ addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
+ addr->valid = 1;
+@@ -413,7 +411,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
+ if (addr) {
+ addr->a.v6.sin6_family = AF_INET6;
+- addr->a.v6.sin6_port = 0;
+ addr->a.v6.sin6_addr = ifp->addr;
+ addr->a.v6.sin6_scope_id = dev->ifindex;
+ addr->valid = 1;
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index fb7b7632316a..8ea8217db960 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -151,7 +151,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
+ if (addr) {
+ addr->a.v4.sin_family = AF_INET;
+- addr->a.v4.sin_port = 0;
+ addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
+ addr->valid = 1;
+ INIT_LIST_HEAD(&addr->list);
+@@ -777,10 +776,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
+
+ switch (ev) {
+ case NETDEV_UP:
+- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
++ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
+ if (addr) {
+ addr->a.v4.sin_family = AF_INET;
+- addr->a.v4.sin_port = 0;
+ addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
+ addr->valid = 1;
+ spin_lock_bh(&net->sctp.local_addr_lock);
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 5b30603596d0..eafc78e063f1 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -770,6 +770,12 @@ void rpcb_getport_async(struct rpc_task *task)
+ case RPCBVERS_3:
+ map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
+ map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
++ if (!map->r_addr) {
++ status = -ENOMEM;
++ dprintk("RPC: %5u %s: no memory available\n",
++ task->tk_pid, __func__);
++ goto bailout_free_args;
++ }
+ map->r_owner = "";
+ break;
+ case RPCBVERS_2:
+@@ -792,6 +798,8 @@ void rpcb_getport_async(struct rpc_task *task)
+ rpc_put_task(child);
+ return;
+
++bailout_free_args:
++ kfree(map);
+ bailout_release_client:
+ rpc_release_client(rpcb_clnt);
+ bailout_nofree:
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index aedc476fac02..d947b8210399 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
+ return limit;
+ }
+
++static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
++{
++ return TLV_GET_LEN(tlv) - TLV_SPACE(0);
++}
++
+ static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
+ {
+ struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
+@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
+ return buf;
+ }
+
++static inline bool string_is_valid(char *s, int len)
++{
++ return memchr(s, '\0', len) ? true : false;
++}
++
+ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
+ struct tipc_nl_compat_msg *msg,
+ struct sk_buff *arg)
+@@ -370,6 +380,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
+ struct nlattr *prop;
+ struct nlattr *bearer;
+ struct tipc_bearer_config *b;
++ int len;
+
+ b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
+
+@@ -377,6 +388,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
+ if (!bearer)
+ return -EMSGSIZE;
+
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
++ if (!string_is_valid(b->name, len))
++ return -EINVAL;
++
+ if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
+ return -EMSGSIZE;
+
+@@ -402,6 +417,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
+ {
+ char *name;
+ struct nlattr *bearer;
++ int len;
+
+ name = (char *)TLV_DATA(msg->req);
+
+@@ -409,6 +425,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
+ if (!bearer)
+ return -EMSGSIZE;
+
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
++ if (!string_is_valid(name, len))
++ return -EINVAL;
++
+ if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
+ return -EMSGSIZE;
+
+@@ -469,6 +489,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
+ struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
+ int err;
++ int len;
+
+ if (!attrs[TIPC_NLA_LINK])
+ return -EINVAL;
+@@ -495,6 +516,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
+ return err;
+
+ name = (char *)TLV_DATA(msg->req);
++
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
++ if (!string_is_valid(name, len))
++ return -EINVAL;
++
+ if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
+ return 0;
+
+@@ -635,6 +661,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
+ struct nlattr *prop;
+ struct nlattr *media;
+ struct tipc_link_config *lc;
++ int len;
+
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+@@ -642,6 +669,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
+ if (!media)
+ return -EMSGSIZE;
+
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
++ if (!string_is_valid(lc->name, len))
++ return -EINVAL;
++
+ if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
+ return -EMSGSIZE;
+
+@@ -662,6 +693,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
+ struct nlattr *prop;
+ struct nlattr *bearer;
+ struct tipc_link_config *lc;
++ int len;
+
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+@@ -669,6 +701,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
+ if (!bearer)
+ return -EMSGSIZE;
+
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
++ if (!string_is_valid(lc->name, len))
++ return -EINVAL;
++
+ if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
+ return -EMSGSIZE;
+
+@@ -717,9 +753,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
+ struct tipc_link_config *lc;
+ struct tipc_bearer *bearer;
+ struct tipc_media *media;
++ int len;
+
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
++ if (!string_is_valid(lc->name, len))
++ return -EINVAL;
++
+ media = tipc_media_find(lc->name);
+ if (media) {
+ cmd->doit = &tipc_nl_media_set;
+@@ -741,6 +782,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
+ {
+ char *name;
+ struct nlattr *link;
++ int len;
+
+ name = (char *)TLV_DATA(msg->req);
+
+@@ -748,6 +790,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
+ if (!link)
+ return -EMSGSIZE;
+
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
++ if (!string_is_valid(name, len))
++ return -EINVAL;
++
+ if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
+ return -EMSGSIZE;
+
+@@ -769,6 +815,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
+ };
+
+ ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
++ if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
++ return -EINVAL;
+
+ depth = ntohl(ntq->depth);
+
+@@ -1192,7 +1240,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
+ }
+
+ len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
+- if (len && !TLV_OK(msg.req, len)) {
++ if (!len || !TLV_OK(msg.req, len)) {
+ msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
+ err = -EOPNOTSUPP;
+ goto send;
+diff --git a/security/security.c b/security/security.c
+index f825304f04a7..112df16be770 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -904,6 +904,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+
+ void security_cred_free(struct cred *cred)
+ {
++ /*
++ * There is a failure case in prepare_creds() that
++ * may result in a call here with ->security being NULL.
++ */
++ if (unlikely(cred->security == NULL))
++ return;
++
+ call_void_hook(cred_free, cred);
+ }
+
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
+index 175e4dce58df..c483de590ba3 100644
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -726,7 +726,8 @@ static int sens_destroy(void *key, void *datum, void *p)
+ kfree(key);
+ if (datum) {
+ levdatum = datum;
+- ebitmap_destroy(&levdatum->level->cat);
++ if (levdatum->level)
++ ebitmap_destroy(&levdatum->level->cat);
+ kfree(levdatum->level);
+ }
+ kfree(datum);
+diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
+index 0309f2111c70..5367f854fadc 100644
+--- a/security/yama/yama_lsm.c
++++ b/security/yama/yama_lsm.c
+@@ -359,7 +359,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
+ break;
+ case YAMA_SCOPE_RELATIONAL:
+ rcu_read_lock();
+- if (!task_is_descendant(current, child) &&
++ if (!pid_alive(child))
++ rc = -EPERM;
++ if (!rc && !task_is_descendant(current, child) &&
+ !ptracer_exception_found(current, child) &&
+ !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
+ rc = -EPERM;