summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2018-08-17 15:26:38 -0400
committerMike Pagano <mpagano@gentoo.org>2018-08-17 15:26:38 -0400
commit22f52f43b9e8d3d1ff09551d42d95d4b81adf987 (patch)
tree39ca1fd8e81595764c4c5cc4d43669d1e793e7d3
parentx86/l1tf: Fix build error seen if CONFIG_KVM_INTEL is disabled (diff)
downloadlinux-patches-22f52f43.tar.gz
linux-patches-22f52f43.tar.bz2
linux-patches-22f52f43.zip
Linux patch 4.14.64
-rw-r--r--0000_README4
-rw-r--r--1063_linux-4.14.64.patch1527
2 files changed, 1531 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index ada5b829..41f1d759 100644
--- a/0000_README
+++ b/0000_README
@@ -295,6 +295,10 @@ Patch: 1062_linux-4.14.63.patch
From: http://www.kernel.org
Desc: Linux 4.14.63
+Patch: 1063_linux-4.14.64.patch
+From: http://www.kernel.org
+Desc: Linux 4.14.64
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1063_linux-4.14.64.patch b/1063_linux-4.14.64.patch
new file mode 100644
index 00000000..b4a1f5f5
--- /dev/null
+++ b/1063_linux-4.14.64.patch
@@ -0,0 +1,1527 @@
+diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
+index 560beaef5a7c..73fcdcd52b87 100644
+--- a/Documentation/process/changes.rst
++++ b/Documentation/process/changes.rst
+@@ -33,7 +33,7 @@ GNU C 3.2 gcc --version
+ GNU make 3.81 make --version
+ binutils 2.20 ld -v
+ util-linux 2.10o fdformat --version
+-module-init-tools 0.9.10 depmod -V
++kmod 13 depmod -V
+ e2fsprogs 1.41.4 e2fsck -V
+ jfsutils 1.1.3 fsck.jfs -V
+ reiserfsprogs 3.6.3 reiserfsck -V
+@@ -141,12 +141,6 @@ is not build with ``CONFIG_KALLSYMS`` and you have no way to rebuild and
+ reproduce the Oops with that option, then you can still decode that Oops
+ with ksymoops.
+
+-Module-Init-Tools
+------------------
+-
+-A new module loader is now in the kernel that requires ``module-init-tools``
+-to use. It is backward compatible with the 2.4.x series kernels.
+-
+ Mkinitrd
+ --------
+
+@@ -346,16 +340,17 @@ Util-linux
+
+ - <https://www.kernel.org/pub/linux/utils/util-linux/>
+
++Kmod
++----
++
++- <https://www.kernel.org/pub/linux/utils/kernel/kmod/>
++- <https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git>
++
+ Ksymoops
+ --------
+
+ - <https://www.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
+
+-Module-Init-Tools
+------------------
+-
+-- <https://www.kernel.org/pub/linux/utils/kernel/module-init-tools/>
+-
+ Mkinitrd
+ --------
+
+diff --git a/Makefile b/Makefile
+index f3bb9428b3dc..025156791e90 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 63
++SUBLEVEL = 64
+ EXTRAVERSION =
+ NAME = Petit Gorille
+
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index f6b877d2726d..6ac0d32d60a5 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -938,12 +938,12 @@ int pmd_clear_huge(pmd_t *pmd)
+ return 1;
+ }
+
+-int pud_free_pmd_page(pud_t *pud)
++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+ return pud_none(*pud);
+ }
+
+-int pmd_free_pte_page(pmd_t *pmd)
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+ return pmd_none(*pmd);
+ }
+diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+index 16c4ccb1f154..d2364c55bbde 100644
+--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
++++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+@@ -265,7 +265,7 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
+ vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
+ vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
+ vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
+- vmovd _args_digest(state , idx, 4) , %xmm0
++ vmovd _args_digest+4*32(state, idx, 4), %xmm1
+ vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
+ vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
+ vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
+diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
+index 5cdcdbd4d892..89789e8c80f6 100644
+--- a/arch/x86/include/asm/i8259.h
++++ b/arch/x86/include/asm/i8259.h
+@@ -3,6 +3,7 @@
+ #define _ASM_X86_I8259_H
+
+ #include <linux/delay.h>
++#include <asm/io.h>
+
+ extern unsigned int cached_irq_mask;
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index edfc64a8a154..d07addb99b71 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -648,10 +648,9 @@ void x86_spec_ctrl_setup_ap(void)
+ enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
+ EXPORT_SYMBOL_GPL(l1tf_mitigation);
+-
++#endif
+ enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+-#endif
+
+ static void __init l1tf_select_mitigation(void)
+ {
+diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
+index c03c85e4fb6a..2bdb8e8a9d7c 100644
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -712,28 +712,50 @@ int pmd_clear_huge(pmd_t *pmd)
+ return 0;
+ }
+
++#ifdef CONFIG_X86_64
+ /**
+ * pud_free_pmd_page - Clear pud entry and free pmd page.
+ * @pud: Pointer to a PUD.
++ * @addr: Virtual address associated with pud.
+ *
+- * Context: The pud range has been unmaped and TLB purged.
++ * Context: The pud range has been unmapped and TLB purged.
+ * Return: 1 if clearing the entry succeeded. 0 otherwise.
++ *
++ * NOTE: Callers must allow a single page allocation.
+ */
+-int pud_free_pmd_page(pud_t *pud)
++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+- pmd_t *pmd;
++ pmd_t *pmd, *pmd_sv;
++ pte_t *pte;
+ int i;
+
+ if (pud_none(*pud))
+ return 1;
+
+ pmd = (pmd_t *)pud_page_vaddr(*pud);
++ pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
++ if (!pmd_sv)
++ return 0;
+
+- for (i = 0; i < PTRS_PER_PMD; i++)
+- if (!pmd_free_pte_page(&pmd[i]))
+- return 0;
++ for (i = 0; i < PTRS_PER_PMD; i++) {
++ pmd_sv[i] = pmd[i];
++ if (!pmd_none(pmd[i]))
++ pmd_clear(&pmd[i]);
++ }
+
+ pud_clear(pud);
++
++ /* INVLPG to clear all paging-structure caches */
++ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
++
++ for (i = 0; i < PTRS_PER_PMD; i++) {
++ if (!pmd_none(pmd_sv[i])) {
++ pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
++ free_page((unsigned long)pte);
++ }
++ }
++
++ free_page((unsigned long)pmd_sv);
+ free_page((unsigned long)pmd);
+
+ return 1;
+@@ -742,11 +764,12 @@ int pud_free_pmd_page(pud_t *pud)
+ /**
+ * pmd_free_pte_page - Clear pmd entry and free pte page.
+ * @pmd: Pointer to a PMD.
++ * @addr: Virtual address associated with pmd.
+ *
+- * Context: The pmd range has been unmaped and TLB purged.
++ * Context: The pmd range has been unmapped and TLB purged.
+ * Return: 1 if clearing the entry succeeded. 0 otherwise.
+ */
+-int pmd_free_pte_page(pmd_t *pmd)
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+ pte_t *pte;
+
+@@ -755,8 +778,30 @@ int pmd_free_pte_page(pmd_t *pmd)
+
+ pte = (pte_t *)pmd_page_vaddr(*pmd);
+ pmd_clear(pmd);
++
++ /* INVLPG to clear all paging-structure caches */
++ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
++
+ free_page((unsigned long)pte);
+
+ return 1;
+ }
++
++#else /* !CONFIG_X86_64 */
++
++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
++{
++ return pud_none(*pud);
++}
++
++/*
++ * Disable free page handling on x86-PAE. This assures that ioremap()
++ * does not update sync'd pmd entries. See vmalloc_sync_one().
++ */
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
++{
++ return pmd_none(*pmd);
++}
++
++#endif /* CONFIG_X86_64 */
+ #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 4a4b7d3c909a..3b44bd28fc45 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -1203,6 +1203,24 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
+ return dur;
+ }
+
++/*
++ * Return the farthest future time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_greatest_from_now(void)
++{
++ return jiffies + MAX_JIFFY_OFFSET;
++}
++
++/*
++ * Return the farthest past time instant according to jiffies
++ * macros.
++ */
++static unsigned long bfq_smallest_from_now(void)
++{
++ return jiffies - MAX_JIFFY_OFFSET;
++}
++
+ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq,
+ unsigned int old_wr_coeff,
+@@ -1217,7 +1235,19 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+ } else {
+- bfqq->wr_start_at_switch_to_srt = jiffies;
++ /*
++ * No interactive weight raising in progress
++ * here: assign minus infinity to
++ * wr_start_at_switch_to_srt, to make sure
++ * that, at the end of the soft-real-time
++ * weight raising periods that is starting
++ * now, no interactive weight-raising period
++ * may be wrongly considered as still in
++ * progress (and thus actually started by
++ * mistake).
++ */
++ bfqq->wr_start_at_switch_to_srt =
++ bfq_smallest_from_now();
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff *
+ BFQ_SOFTRT_WEIGHT_FACTOR;
+ bfqq->wr_cur_max_time =
+@@ -2896,24 +2926,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
+ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
+ }
+
+-/*
+- * Return the farthest future time instant according to jiffies
+- * macros.
+- */
+-static unsigned long bfq_greatest_from_now(void)
+-{
+- return jiffies + MAX_JIFFY_OFFSET;
+-}
+-
+-/*
+- * Return the farthest past time instant according to jiffies
+- * macros.
+- */
+-static unsigned long bfq_smallest_from_now(void)
+-{
+- return jiffies - MAX_JIFFY_OFFSET;
+-}
+-
+ /**
+ * bfq_bfqq_expire - expire a queue.
+ * @bfqd: device owning the queue.
+diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
+index d880a4897159..4ee7c041bb82 100644
+--- a/crypto/ablkcipher.c
++++ b/crypto/ablkcipher.c
+@@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
+ return max(start, end_page);
+ }
+
+-static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
+- unsigned int bsize)
++static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
++ unsigned int n)
+ {
+- unsigned int n = bsize;
+-
+ for (;;) {
+ unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
+
+@@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
+ n -= len_this_page;
+ scatterwalk_start(&walk->out, sg_next(walk->out.sg));
+ }
+-
+- return bsize;
+ }
+
+-static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
+- unsigned int n)
++static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
++ unsigned int n)
+ {
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_advance(&walk->out, n);
+-
+- return n;
+ }
+
+ static int ablkcipher_walk_next(struct ablkcipher_request *req,
+@@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk, int err)
+ {
+ struct crypto_tfm *tfm = req->base.tfm;
+- unsigned int nbytes = 0;
++ unsigned int n; /* bytes processed */
++ bool more;
+
+- if (likely(err >= 0)) {
+- unsigned int n = walk->nbytes - err;
++ if (unlikely(err < 0))
++ goto finish;
+
+- if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
+- n = ablkcipher_done_fast(walk, n);
+- else if (WARN_ON(err)) {
+- err = -EINVAL;
+- goto err;
+- } else
+- n = ablkcipher_done_slow(walk, n);
++ n = walk->nbytes - err;
++ walk->total -= n;
++ more = (walk->total != 0);
+
+- nbytes = walk->total - n;
+- err = 0;
++ if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
++ ablkcipher_done_fast(walk, n);
++ } else {
++ if (WARN_ON(err)) {
++ /* unexpected case; didn't process all bytes */
++ err = -EINVAL;
++ goto finish;
++ }
++ ablkcipher_done_slow(walk, n);
+ }
+
+- scatterwalk_done(&walk->in, 0, nbytes);
+- scatterwalk_done(&walk->out, 1, nbytes);
+-
+-err:
+- walk->total = nbytes;
+- walk->nbytes = nbytes;
++ scatterwalk_done(&walk->in, 0, more);
++ scatterwalk_done(&walk->out, 1, more);
+
+- if (nbytes) {
++ if (more) {
+ crypto_yield(req->base.flags);
+ return ablkcipher_walk_next(req, walk);
+ }
+-
++ err = 0;
++finish:
++ walk->nbytes = 0;
+ if (walk->iv != req->info)
+ memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
+ kfree(walk->iv_buffer);
+-
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
+diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
+index 6c43a0a17a55..d84c6920ada9 100644
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -71,19 +71,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
+ return max(start, end_page);
+ }
+
+-static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
+- unsigned int bsize)
++static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
++ unsigned int bsize)
+ {
+ u8 *addr;
+
+ addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
+ addr = blkcipher_get_spot(addr, bsize);
+ scatterwalk_copychunks(addr, &walk->out, bsize, 1);
+- return bsize;
+ }
+
+-static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
+- unsigned int n)
++static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
++ unsigned int n)
+ {
+ if (walk->flags & BLKCIPHER_WALK_COPY) {
+ blkcipher_map_dst(walk);
+@@ -97,49 +96,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
+
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_advance(&walk->out, n);
+-
+- return n;
+ }
+
+ int blkcipher_walk_done(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk, int err)
+ {
+- unsigned int nbytes = 0;
++ unsigned int n; /* bytes processed */
++ bool more;
+
+- if (likely(err >= 0)) {
+- unsigned int n = walk->nbytes - err;
++ if (unlikely(err < 0))
++ goto finish;
+
+- if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
+- n = blkcipher_done_fast(walk, n);
+- else if (WARN_ON(err)) {
+- err = -EINVAL;
+- goto err;
+- } else
+- n = blkcipher_done_slow(walk, n);
++ n = walk->nbytes - err;
++ walk->total -= n;
++ more = (walk->total != 0);
+
+- nbytes = walk->total - n;
+- err = 0;
++ if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
++ blkcipher_done_fast(walk, n);
++ } else {
++ if (WARN_ON(err)) {
++ /* unexpected case; didn't process all bytes */
++ err = -EINVAL;
++ goto finish;
++ }
++ blkcipher_done_slow(walk, n);
+ }
+
+- scatterwalk_done(&walk->in, 0, nbytes);
+- scatterwalk_done(&walk->out, 1, nbytes);
++ scatterwalk_done(&walk->in, 0, more);
++ scatterwalk_done(&walk->out, 1, more);
+
+-err:
+- walk->total = nbytes;
+- walk->nbytes = nbytes;
+-
+- if (nbytes) {
++ if (more) {
+ crypto_yield(desc->flags);
+ return blkcipher_walk_next(desc, walk);
+ }
+-
++ err = 0;
++finish:
++ walk->nbytes = 0;
+ if (walk->iv != desc->info)
+ memcpy(desc->info, walk->iv, walk->ivsize);
+ if (walk->buffer != walk->page)
+ kfree(walk->buffer);
+ if (walk->page)
+ free_page((unsigned long)walk->page);
+-
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(blkcipher_walk_done);
+diff --git a/crypto/skcipher.c b/crypto/skcipher.c
+index 11af5fd6a443..e319421a32e7 100644
+--- a/crypto/skcipher.c
++++ b/crypto/skcipher.c
+@@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
+ return max(start, end_page);
+ }
+
+-static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
++static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+ {
+ u8 *addr;
+
+@@ -103,23 +103,24 @@ static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+ addr = skcipher_get_spot(addr, bsize);
+ scatterwalk_copychunks(addr, &walk->out, bsize,
+ (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
+- return 0;
+ }
+
+ int skcipher_walk_done(struct skcipher_walk *walk, int err)
+ {
+- unsigned int n = walk->nbytes - err;
+- unsigned int nbytes;
++ unsigned int n; /* bytes processed */
++ bool more;
++
++ if (unlikely(err < 0))
++ goto finish;
+
+- nbytes = walk->total - n;
++ n = walk->nbytes - err;
++ walk->total -= n;
++ more = (walk->total != 0);
+
+- if (unlikely(err < 0)) {
+- nbytes = 0;
+- n = 0;
+- } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
+- SKCIPHER_WALK_SLOW |
+- SKCIPHER_WALK_COPY |
+- SKCIPHER_WALK_DIFF)))) {
++ if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
++ SKCIPHER_WALK_SLOW |
++ SKCIPHER_WALK_COPY |
++ SKCIPHER_WALK_DIFF)))) {
+ unmap_src:
+ skcipher_unmap_src(walk);
+ } else if (walk->flags & SKCIPHER_WALK_DIFF) {
+@@ -131,28 +132,28 @@ unmap_src:
+ skcipher_unmap_dst(walk);
+ } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
+ if (WARN_ON(err)) {
++ /* unexpected case; didn't process all bytes */
+ err = -EINVAL;
+- nbytes = 0;
+- } else
+- n = skcipher_done_slow(walk, n);
++ goto finish;
++ }
++ skcipher_done_slow(walk, n);
++ goto already_advanced;
+ }
+
+- if (err > 0)
+- err = 0;
+-
+- walk->total = nbytes;
+- walk->nbytes = nbytes;
+-
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_advance(&walk->out, n);
+- scatterwalk_done(&walk->in, 0, nbytes);
+- scatterwalk_done(&walk->out, 1, nbytes);
++already_advanced:
++ scatterwalk_done(&walk->in, 0, more);
++ scatterwalk_done(&walk->out, 1, more);
+
+- if (nbytes) {
++ if (more) {
+ crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
+ CRYPTO_TFM_REQ_MAY_SLEEP : 0);
+ return skcipher_walk_next(walk);
+ }
++ err = 0;
++finish:
++ walk->nbytes = 0;
+
+ /* Short-circuit for the common/fast path. */
+ if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
+@@ -399,7 +400,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
+ unsigned size;
+ u8 *iv;
+
+- aligned_bs = ALIGN(bs, alignmask);
++ aligned_bs = ALIGN(bs, alignmask + 1);
+
+ /* Minimum size to align buffer by alignmask. */
+ size = alignmask & ~a;
+diff --git a/crypto/vmac.c b/crypto/vmac.c
+index df76a816cfb2..bb2fc787d615 100644
+--- a/crypto/vmac.c
++++ b/crypto/vmac.c
+@@ -1,6 +1,10 @@
+ /*
+- * Modified to interface to the Linux kernel
++ * VMAC: Message Authentication Code using Universal Hashing
++ *
++ * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
++ *
+ * Copyright (c) 2009, Intel Corporation.
++ * Copyright (c) 2018, Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+@@ -16,14 +20,15 @@
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+-/* --------------------------------------------------------------------------
+- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+- * This implementation is herby placed in the public domain.
+- * The authors offers no warranty. Use at your own risk.
+- * Please send bug reports to the authors.
+- * Last modified: 17 APR 08, 1700 PDT
+- * ----------------------------------------------------------------------- */
++/*
++ * Derived from:
++ * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
++ * This implementation is herby placed in the public domain.
++ * The authors offers no warranty. Use at your own risk.
++ * Last modified: 17 APR 08, 1700 PDT
++ */
+
++#include <asm/unaligned.h>
+ #include <linux/init.h>
+ #include <linux/types.h>
+ #include <linux/crypto.h>
+@@ -31,9 +36,35 @@
+ #include <linux/scatterlist.h>
+ #include <asm/byteorder.h>
+ #include <crypto/scatterwalk.h>
+-#include <crypto/vmac.h>
+ #include <crypto/internal/hash.h>
+
++/*
++ * User definable settings.
++ */
++#define VMAC_TAG_LEN 64
++#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
++#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
++#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
++
++/* per-transform (per-key) context */
++struct vmac_tfm_ctx {
++ struct crypto_cipher *cipher;
++ u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
++ u64 polykey[2*VMAC_TAG_LEN/64];
++ u64 l3key[2*VMAC_TAG_LEN/64];
++};
++
++/* per-request context */
++struct vmac_desc_ctx {
++ union {
++ u8 partial[VMAC_NHBYTES]; /* partial block */
++ __le64 partial_words[VMAC_NHBYTES / 8];
++ };
++ unsigned int partial_size; /* size of the partial block */
++ bool first_block_processed;
++ u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
++};
++
+ /*
+ * Constants and masks
+ */
+@@ -318,13 +349,6 @@ static void poly_step_func(u64 *ahi, u64 *alo,
+ } while (0)
+ #endif
+
+-static void vhash_abort(struct vmac_ctx *ctx)
+-{
+- ctx->polytmp[0] = ctx->polykey[0] ;
+- ctx->polytmp[1] = ctx->polykey[1] ;
+- ctx->first_block_processed = 0;
+-}
+-
+ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
+ {
+ u64 rh, rl, t, z = 0;
+@@ -364,280 +388,209 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
+ return rl;
+ }
+
+-static void vhash_update(const unsigned char *m,
+- unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
+- struct vmac_ctx *ctx)
++/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
++static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
++ struct vmac_desc_ctx *dctx,
++ const __le64 *mptr, unsigned int blocks)
+ {
+- u64 rh, rl, *mptr;
+- const u64 *kptr = (u64 *)ctx->nhkey;
+- int i;
+- u64 ch, cl;
+- u64 pkh = ctx->polykey[0];
+- u64 pkl = ctx->polykey[1];
+-
+- if (!mbytes)
+- return;
+-
+- BUG_ON(mbytes % VMAC_NHBYTES);
+-
+- mptr = (u64 *)m;
+- i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
+-
+- ch = ctx->polytmp[0];
+- cl = ctx->polytmp[1];
+-
+- if (!ctx->first_block_processed) {
+- ctx->first_block_processed = 1;
++ const u64 *kptr = tctx->nhkey;
++ const u64 pkh = tctx->polykey[0];
++ const u64 pkl = tctx->polykey[1];
++ u64 ch = dctx->polytmp[0];
++ u64 cl = dctx->polytmp[1];
++ u64 rh, rl;
++
++ if (!dctx->first_block_processed) {
++ dctx->first_block_processed = true;
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+ rh &= m62;
+ ADD128(ch, cl, rh, rl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+- i--;
++ blocks--;
+ }
+
+- while (i--) {
++ while (blocks--) {
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+ rh &= m62;
+ poly_step(ch, cl, pkh, pkl, rh, rl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ }
+
+- ctx->polytmp[0] = ch;
+- ctx->polytmp[1] = cl;
++ dctx->polytmp[0] = ch;
++ dctx->polytmp[1] = cl;
+ }
+
+-static u64 vhash(unsigned char m[], unsigned int mbytes,
+- u64 *tagl, struct vmac_ctx *ctx)
++static int vmac_setkey(struct crypto_shash *tfm,
++ const u8 *key, unsigned int keylen)
+ {
+- u64 rh, rl, *mptr;
+- const u64 *kptr = (u64 *)ctx->nhkey;
+- int i, remaining;
+- u64 ch, cl;
+- u64 pkh = ctx->polykey[0];
+- u64 pkl = ctx->polykey[1];
+-
+- mptr = (u64 *)m;
+- i = mbytes / VMAC_NHBYTES;
+- remaining = mbytes % VMAC_NHBYTES;
+-
+- if (ctx->first_block_processed) {
+- ch = ctx->polytmp[0];
+- cl = ctx->polytmp[1];
+- } else if (i) {
+- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
+- ch &= m62;
+- ADD128(ch, cl, pkh, pkl);
+- mptr += (VMAC_NHBYTES/sizeof(u64));
+- i--;
+- } else if (remaining) {
+- nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
+- ch &= m62;
+- ADD128(ch, cl, pkh, pkl);
+- mptr += (VMAC_NHBYTES/sizeof(u64));
+- goto do_l3;
+- } else {/* Empty String */
+- ch = pkh; cl = pkl;
+- goto do_l3;
+- }
+-
+- while (i--) {
+- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+- rh &= m62;
+- poly_step(ch, cl, pkh, pkl, rh, rl);
+- mptr += (VMAC_NHBYTES/sizeof(u64));
+- }
+- if (remaining) {
+- nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
+- rh &= m62;
+- poly_step(ch, cl, pkh, pkl, rh, rl);
+- }
+-
+-do_l3:
+- vhash_abort(ctx);
+- remaining *= 8;
+- return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
+-}
++ struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
++ __be64 out[2];
++ u8 in[16] = { 0 };
++ unsigned int i;
++ int err;
+
+-static u64 vmac(unsigned char m[], unsigned int mbytes,
+- const unsigned char n[16], u64 *tagl,
+- struct vmac_ctx_t *ctx)
+-{
+- u64 *in_n, *out_p;
+- u64 p, h;
+- int i;
+-
+- in_n = ctx->__vmac_ctx.cached_nonce;
+- out_p = ctx->__vmac_ctx.cached_aes;
+-
+- i = n[15] & 1;
+- if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
+- in_n[0] = *(u64 *)(n);
+- in_n[1] = *(u64 *)(n+8);
+- ((unsigned char *)in_n)[15] &= 0xFE;
+- crypto_cipher_encrypt_one(ctx->child,
+- (unsigned char *)out_p, (unsigned char *)in_n);
+-
+- ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
++ if (keylen != VMAC_KEY_LEN) {
++ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
+ }
+- p = be64_to_cpup(out_p + i);
+- h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
+- return le64_to_cpu(p + h);
+-}
+
+-static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
+-{
+- u64 in[2] = {0}, out[2];
+- unsigned i;
+- int err = 0;
+-
+- err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
++ err = crypto_cipher_setkey(tctx->cipher, key, keylen);
+ if (err)
+ return err;
+
+ /* Fill nh key */
+- ((unsigned char *)in)[0] = 0x80;
+- for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
+- crypto_cipher_encrypt_one(ctx->child,
+- (unsigned char *)out, (unsigned char *)in);
+- ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
+- ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
+- ((unsigned char *)in)[15] += 1;
++ in[0] = 0x80;
++ for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
++ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
++ tctx->nhkey[i] = be64_to_cpu(out[0]);
++ tctx->nhkey[i+1] = be64_to_cpu(out[1]);
++ in[15]++;
+ }
+
+ /* Fill poly key */
+- ((unsigned char *)in)[0] = 0xC0;
+- in[1] = 0;
+- for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
+- crypto_cipher_encrypt_one(ctx->child,
+- (unsigned char *)out, (unsigned char *)in);
+- ctx->__vmac_ctx.polytmp[i] =
+- ctx->__vmac_ctx.polykey[i] =
+- be64_to_cpup(out) & mpoly;
+- ctx->__vmac_ctx.polytmp[i+1] =
+- ctx->__vmac_ctx.polykey[i+1] =
+- be64_to_cpup(out+1) & mpoly;
+- ((unsigned char *)in)[15] += 1;
++ in[0] = 0xC0;
++ in[15] = 0;
++ for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
++ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
++ tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
++ tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
++ in[15]++;
+ }
+
+ /* Fill ip key */
+- ((unsigned char *)in)[0] = 0xE0;
+- in[1] = 0;
+- for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
++ in[0] = 0xE0;
++ in[15] = 0;
++ for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
+ do {
+- crypto_cipher_encrypt_one(ctx->child,
+- (unsigned char *)out, (unsigned char *)in);
+- ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
+- ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
+- ((unsigned char *)in)[15] += 1;
+- } while (ctx->__vmac_ctx.l3key[i] >= p64
+- || ctx->__vmac_ctx.l3key[i+1] >= p64);
++ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
++ tctx->l3key[i] = be64_to_cpu(out[0]);
++ tctx->l3key[i+1] = be64_to_cpu(out[1]);
++ in[15]++;
++ } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
+ }
+
+- /* Invalidate nonce/aes cache and reset other elements */
+- ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
+- ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
+- ctx->__vmac_ctx.first_block_processed = 0;
+-
+- return err;
++ return 0;
+ }
+
+-static int vmac_setkey(struct crypto_shash *parent,
+- const u8 *key, unsigned int keylen)
++static int vmac_init(struct shash_desc *desc)
+ {
+- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
++ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
++ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+
+- if (keylen != VMAC_KEY_LEN) {
+- crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
+- return -EINVAL;
+- }
+-
+- return vmac_set_key((u8 *)key, ctx);
+-}
+-
+-static int vmac_init(struct shash_desc *pdesc)
+-{
++ dctx->partial_size = 0;
++ dctx->first_block_processed = false;
++ memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
+ return 0;
+ }
+
+-static int vmac_update(struct shash_desc *pdesc, const u8 *p,
+- unsigned int len)
++static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
+ {
+- struct crypto_shash *parent = pdesc->tfm;
+- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+- int expand;
+- int min;
+-
+- expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
+- VMAC_NHBYTES - ctx->partial_size : 0;
+-
+- min = len < expand ? len : expand;
+-
+- memcpy(ctx->partial + ctx->partial_size, p, min);
+- ctx->partial_size += min;
+-
+- if (len < expand)
+- return 0;
+-
+- vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
+- ctx->partial_size = 0;
+-
+- len -= expand;
+- p += expand;
++ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
++ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
++ unsigned int n;
++
++ if (dctx->partial_size) {
++ n = min(len, VMAC_NHBYTES - dctx->partial_size);
++ memcpy(&dctx->partial[dctx->partial_size], p, n);
++ dctx->partial_size += n;
++ p += n;
++ len -= n;
++ if (dctx->partial_size == VMAC_NHBYTES) {
++ vhash_blocks(tctx, dctx, dctx->partial_words, 1);
++ dctx->partial_size = 0;
++ }
++ }
+
+- if (len % VMAC_NHBYTES) {
+- memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
+- len % VMAC_NHBYTES);
+- ctx->partial_size = len % VMAC_NHBYTES;
++ if (len >= VMAC_NHBYTES) {
++ n = round_down(len, VMAC_NHBYTES);
++ /* TODO: 'p' may be misaligned here */
++ vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
++ p += n;
++ len -= n;
+ }
+
+- vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
++ if (len) {
++ memcpy(dctx->partial, p, len);
++ dctx->partial_size = len;
++ }
+
+ return 0;
+ }
+
+-static int vmac_final(struct shash_desc *pdesc, u8 *out)
++static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
++ struct vmac_desc_ctx *dctx)
+ {
+- struct crypto_shash *parent = pdesc->tfm;
+- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+- vmac_t mac;
+- u8 nonce[16] = {};
+-
+- /* vmac() ends up accessing outside the array bounds that
+- * we specify. In appears to access up to the next 2-word
+- * boundary. We'll just be uber cautious and zero the
+- * unwritten bytes in the buffer.
+- */
+- if (ctx->partial_size) {
+- memset(ctx->partial + ctx->partial_size, 0,
+- VMAC_NHBYTES - ctx->partial_size);
++ unsigned int partial = dctx->partial_size;
++ u64 ch = dctx->polytmp[0];
++ u64 cl = dctx->polytmp[1];
++
++ /* L1 and L2-hash the final block if needed */
++ if (partial) {
++ /* Zero-pad to next 128-bit boundary */
++ unsigned int n = round_up(partial, 16);
++ u64 rh, rl;
++
++ memset(&dctx->partial[partial], 0, n - partial);
++ nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
++ rh &= m62;
++ if (dctx->first_block_processed)
++ poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
++ rh, rl);
++ else
++ ADD128(ch, cl, rh, rl);
+ }
+- mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
+- memcpy(out, &mac, sizeof(vmac_t));
+- memzero_explicit(&mac, sizeof(vmac_t));
+- memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
+- ctx->partial_size = 0;
++
++ /* L3-hash the 128-bit output of L2-hash */
++ return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
++}
++
++static int vmac_final(struct shash_desc *desc, u8 *out)
++{
++ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
++ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
++ static const u8 nonce[16] = {}; /* TODO: this is insecure */
++ union {
++ u8 bytes[16];
++ __be64 pads[2];
++ } block;
++ int index;
++ u64 hash, pad;
++
++ /* Finish calculating the VHASH of the message */
++ hash = vhash_final(tctx, dctx);
++
++ /* Generate pseudorandom pad by encrypting the nonce */
++ memcpy(&block, nonce, 16);
++ index = block.bytes[15] & 1;
++ block.bytes[15] &= ~1;
++ crypto_cipher_encrypt_one(tctx->cipher, block.bytes, block.bytes);
++ pad = be64_to_cpu(block.pads[index]);
++
++ /* The VMAC is the sum of VHASH and the pseudorandom pad */
++ put_unaligned_le64(hash + pad, out);
+ return 0;
+ }
+
+ static int vmac_init_tfm(struct crypto_tfm *tfm)
+ {
+- struct crypto_cipher *cipher;
+- struct crypto_instance *inst = (void *)tfm->__crt_alg;
++ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+ struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
++ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
++ struct crypto_cipher *cipher;
+
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+- ctx->child = cipher;
++ tctx->cipher = cipher;
+ return 0;
+ }
+
+ static void vmac_exit_tfm(struct crypto_tfm *tfm)
+ {
+- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+- crypto_free_cipher(ctx->child);
++ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
++
++ crypto_free_cipher(tctx->cipher);
+ }
+
+ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+@@ -655,6 +608,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+ if (IS_ERR(alg))
+ return PTR_ERR(alg);
+
++ err = -EINVAL;
++ if (alg->cra_blocksize != 16)
++ goto out_put_alg;
++
+ inst = shash_alloc_instance("vmac", alg);
+ err = PTR_ERR(inst);
+ if (IS_ERR(inst))
+@@ -670,11 +627,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
+
+- inst->alg.digestsize = sizeof(vmac_t);
+- inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
++ inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
+ inst->alg.base.cra_init = vmac_init_tfm;
+ inst->alg.base.cra_exit = vmac_exit_tfm;
+
++ inst->alg.descsize = sizeof(struct vmac_desc_ctx);
++ inst->alg.digestsize = VMAC_TAG_LEN / 8;
+ inst->alg.init = vmac_init;
+ inst->alg.update = vmac_update;
+ inst->alg.final = vmac_final;
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index 572b6c7303ed..f14695e744d0 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -114,19 +114,7 @@ static DEFINE_MUTEX(ghes_list_mutex);
+ * from BIOS to Linux can be determined only in NMI, IRQ or timer
+ * handler, but general ioremap can not be used in atomic context, so
+ * the fixmap is used instead.
+- */
+-
+-/*
+- * Two virtual pages are used, one for IRQ/PROCESS context, the other for
+- * NMI context (optionally).
+- */
+-#define GHES_IOREMAP_PAGES 2
+-#define GHES_IOREMAP_IRQ_PAGE(base) (base)
+-#define GHES_IOREMAP_NMI_PAGE(base) ((base) + PAGE_SIZE)
+-
+-/* virtual memory area for atomic ioremap */
+-static struct vm_struct *ghes_ioremap_area;
+-/*
++ *
+ * These 2 spinlocks are used to prevent the fixmap entries from being used
+ * simultaneously.
+ */
+@@ -141,23 +129,6 @@ static atomic_t ghes_estatus_cache_alloced;
+
+ static int ghes_panic_timeout __read_mostly = 30;
+
+-static int ghes_ioremap_init(void)
+-{
+- ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
+- VM_IOREMAP, VMALLOC_START, VMALLOC_END);
+- if (!ghes_ioremap_area) {
+- pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
+- return -ENOMEM;
+- }
+-
+- return 0;
+-}
+-
+-static void ghes_ioremap_exit(void)
+-{
+- free_vm_area(ghes_ioremap_area);
+-}
+-
+ static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
+ {
+ phys_addr_t paddr;
+@@ -1247,13 +1218,9 @@ static int __init ghes_init(void)
+
+ ghes_nmi_init_cxt();
+
+- rc = ghes_ioremap_init();
+- if (rc)
+- goto err;
+-
+ rc = ghes_estatus_pool_init();
+ if (rc)
+- goto err_ioremap_exit;
++ goto err;
+
+ rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
+ GHES_ESTATUS_CACHE_ALLOCED_MAX);
+@@ -1277,8 +1244,6 @@ static int __init ghes_init(void)
+ return 0;
+ err_pool_exit:
+ ghes_estatus_pool_exit();
+-err_ioremap_exit:
+- ghes_ioremap_exit();
+ err:
+ return rc;
+ }
+diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+index 286b0049b7b6..a48fde191c0a 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
++++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+@@ -223,7 +223,7 @@ static struct ccu_mux cpu_clk = {
+ .hw.init = CLK_HW_INIT_PARENTS("cpu",
+ cpu_parents,
+ &ccu_mux_ops,
+- CLK_IS_CRITICAL),
++ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
+ }
+ };
+
+diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
+index 0e2011636fbb..c53c7ac992f8 100644
+--- a/drivers/firmware/qemu_fw_cfg.c
++++ b/drivers/firmware/qemu_fw_cfg.c
+@@ -582,9 +582,10 @@ static int fw_cfg_sysfs_remove(struct platform_device *pdev)
+ {
+ pr_debug("fw_cfg: unloading.\n");
+ fw_cfg_sysfs_cache_cleanup();
++ sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
++ fw_cfg_io_cleanup();
+ fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
+ fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
+- fw_cfg_io_cleanup();
+ return 0;
+ }
+
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index 46a2f5d9aa25..f00421dfacbd 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -991,8 +991,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
+ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+ int pud_clear_huge(pud_t *pud);
+ int pmd_clear_huge(pmd_t *pmd);
+-int pud_free_pmd_page(pud_t *pud);
+-int pmd_free_pte_page(pmd_t *pmd);
++int pud_free_pmd_page(pud_t *pud, unsigned long addr);
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
+ #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+ static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+ {
+@@ -1018,11 +1018,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
+ {
+ return 0;
+ }
+-static inline int pud_free_pmd_page(pud_t *pud)
++static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+ return 0;
+ }
+-static inline int pmd_free_pte_page(pmd_t *pmd)
++static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+ return 0;
+ }
+diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
+deleted file mode 100644
+index 6b700c7b2fe1..000000000000
+--- a/include/crypto/vmac.h
++++ /dev/null
+@@ -1,63 +0,0 @@
+-/*
+- * Modified to interface to the Linux kernel
+- * Copyright (c) 2009, Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+- * Place - Suite 330, Boston, MA 02111-1307 USA.
+- */
+-
+-#ifndef __CRYPTO_VMAC_H
+-#define __CRYPTO_VMAC_H
+-
+-/* --------------------------------------------------------------------------
+- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+- * This implementation is herby placed in the public domain.
+- * The authors offers no warranty. Use at your own risk.
+- * Please send bug reports to the authors.
+- * Last modified: 17 APR 08, 1700 PDT
+- * ----------------------------------------------------------------------- */
+-
+-/*
+- * User definable settings.
+- */
+-#define VMAC_TAG_LEN 64
+-#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
+-#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
+-#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
+-
+-/*
+- * This implementation uses u32 and u64 as names for unsigned 32-
+- * and 64-bit integer types. These are defined in C99 stdint.h. The
+- * following may need adaptation if you are not running a C99 or
+- * Microsoft C environment.
+- */
+-struct vmac_ctx {
+- u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
+- u64 polykey[2*VMAC_TAG_LEN/64];
+- u64 l3key[2*VMAC_TAG_LEN/64];
+- u64 polytmp[2*VMAC_TAG_LEN/64];
+- u64 cached_nonce[2];
+- u64 cached_aes[2];
+- int first_block_processed;
+-};
+-
+-typedef u64 vmac_t;
+-
+-struct vmac_ctx_t {
+- struct crypto_cipher *child;
+- struct vmac_ctx __vmac_ctx;
+- u8 partial[VMAC_NHBYTES]; /* partial block */
+- int partial_size; /* size of the partial block */
+-};
+-
+-#endif /* __CRYPTO_VMAC_H */
+diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
+index d447f24df970..0812cd5408c9 100644
+--- a/include/trace/events/sched.h
++++ b/include/trace/events/sched.h
+@@ -116,9 +116,9 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
+ * RUNNING (we will not have dequeued if state != RUNNING).
+ */
+ if (preempt)
+- return TASK_STATE_MAX;
++ return TASK_REPORT_MAX;
+
+- return __get_task_state(p);
++ return 1 << __get_task_state(p);
+ }
+ #endif /* CREATE_TRACE_POINTS */
+
+@@ -164,7 +164,7 @@ TRACE_EVENT(sched_switch,
+ { 0x40, "P" }, { 0x80, "I" }) :
+ "R",
+
+- __entry->prev_state & TASK_STATE_MAX ? "+" : "",
++ __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
+ __entry->next_comm, __entry->next_pid, __entry->next_prio)
+ );
+
+diff --git a/lib/ioremap.c b/lib/ioremap.c
+index 54e5bbaa3200..517f5853ffed 100644
+--- a/lib/ioremap.c
++++ b/lib/ioremap.c
+@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
+ if (ioremap_pmd_enabled() &&
+ ((next - addr) == PMD_SIZE) &&
+ IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
+- pmd_free_pte_page(pmd)) {
++ pmd_free_pte_page(pmd, addr)) {
+ if (pmd_set_huge(pmd, phys_addr + addr, prot))
+ continue;
+ }
+@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
+ if (ioremap_pud_enabled() &&
+ ((next - addr) == PUD_SIZE) &&
+ IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
+- pud_free_pmd_page(pud)) {
++ pud_free_pmd_page(pud, addr)) {
+ if (pud_set_huge(pud, phys_addr + addr, prot))
+ continue;
+ }
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index 8112893037bd..cef3754408d4 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_session *session)
+ del_timer(&session->timer);
+ }
+
+-static void hidp_process_report(struct hidp_session *session,
+- int type, const u8 *data, int len, int intr)
++static void hidp_process_report(struct hidp_session *session, int type,
++ const u8 *data, unsigned int len, int intr)
+ {
+ if (len > HID_MAX_BUFFER_SIZE)
+ len = HID_MAX_BUFFER_SIZE;
+diff --git a/scripts/depmod.sh b/scripts/depmod.sh
+index 9831cca31240..f41b0a4b575c 100755
+--- a/scripts/depmod.sh
++++ b/scripts/depmod.sh
+@@ -11,10 +11,16 @@ DEPMOD=$1
+ KERNELRELEASE=$2
+ SYMBOL_PREFIX=$3
+
+-if ! test -r System.map -a -x "$DEPMOD"; then
++if ! test -r System.map ; then
+ exit 0
+ fi
+
++if [ -z $(command -v $DEPMOD) ]; then
++ echo "'make modules_install' requires $DEPMOD. Please install it." >&2
++ echo "This is probably in the kmod package." >&2
++ exit 1
++fi
++
+ # older versions of depmod don't support -P <symbol-prefix>
+ # support was added in module-init-tools 3.13
+ if test -n "$SYMBOL_PREFIX"; then
+diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
+index 694db27b11fa..13354d6304a8 100644
+--- a/sound/soc/codecs/msm8916-wcd-digital.c
++++ b/sound/soc/codecs/msm8916-wcd-digital.c
+@@ -238,7 +238,7 @@ static const struct soc_enum rx_mix2_inp1_chain_enum = SOC_ENUM_SINGLE(
+ static const struct soc_enum rx2_mix1_inp_enum[] = {
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text),
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 3, 6, rx_mix1_text),
+- SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text),
++ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B2_CTL, 0, 6, rx_mix1_text),
+ };
+
+ /* RX2 MIX2 */
+@@ -249,7 +249,7 @@ static const struct soc_enum rx2_mix2_inp1_chain_enum = SOC_ENUM_SINGLE(
+ static const struct soc_enum rx3_mix1_inp_enum[] = {
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text),
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 3, 6, rx_mix1_text),
+- SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text),
++ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B2_CTL, 0, 6, rx_mix1_text),
+ };
+
+ /* DEC */
+diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+index 20755ecc7f9e..a02dec251afe 100644
+--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
++++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+@@ -116,23 +116,19 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
+ struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
+ struct snd_soc_jack *jack = &ctx->jack;
+
+- /**
+- * TI supports 4 butons headset detection
+- * KEY_MEDIA
+- * KEY_VOICECOMMAND
+- * KEY_VOLUMEUP
+- * KEY_VOLUMEDOWN
+- */
+- if (ctx->ts3a227e_present)
+- jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+- SND_JACK_BTN_2 | SND_JACK_BTN_3;
+- else
+- jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
++ if (ctx->ts3a227e_present) {
++ /*
++ * The jack has already been created in the
++ * cht_max98090_headset_init() function.
++ */
++ snd_soc_jack_notifier_register(jack, &cht_jack_nb);
++ return 0;
++ }
++
++ jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
+
+ ret = snd_soc_card_jack_new(runtime->card, "Headset Jack",
+ jack_type, jack, NULL, 0);
+-
+ if (ret) {
+ dev_err(runtime->dev, "Headset Jack creation failed %d\n", ret);
+ return ret;
+@@ -188,6 +184,27 @@ static int cht_max98090_headset_init(struct snd_soc_component *component)
+ {
+ struct snd_soc_card *card = component->card;
+ struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
++ struct snd_soc_jack *jack = &ctx->jack;
++ int jack_type;
++ int ret;
++
++ /*
++ * TI supports 4 butons headset detection
++ * KEY_MEDIA
++ * KEY_VOICECOMMAND
++ * KEY_VOLUMEUP
++ * KEY_VOLUMEDOWN
++ */
++ jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
++ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
++ SND_JACK_BTN_2 | SND_JACK_BTN_3;
++
++ ret = snd_soc_card_jack_new(card, "Headset Jack", jack_type,
++ jack, NULL, 0);
++ if (ret) {
++ dev_err(card->dev, "Headset Jack creation failed %d\n", ret);
++ return ret;
++ }
+
+ return ts3a227e_enable_jack_detect(component, &ctx->jack);
+ }
+diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
+index 2684a2ba33cd..e28edb1f7263 100644
+--- a/sound/soc/sh/rcar/adg.c
++++ b/sound/soc/sh/rcar/adg.c
+@@ -479,10 +479,10 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
+ }
+
+ if (req_rate[0] % 48000 == 0)
+- adg->flags = AUDIO_OUT_48;
++ adg->flags |= AUDIO_OUT_48;
+
+ if (of_get_property(np, "clkout-lr-asynchronous", NULL))
+- adg->flags = LRCLK_ASYNC;
++ adg->flags |= LRCLK_ASYNC;
+
+ /*
+ * This driver is assuming that AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC