summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2018-02-03 16:21:20 -0500
committerMike Pagano <mpagano@gentoo.org>2018-02-03 16:21:20 -0500
commitaec749e76d0ff102281e67c557ca37523aeaaaec (patch)
tree4dc233cee55adc260079a1a05641180bce94286c
parentlinux kernel 4.14.16 (diff)
downloadlinux-patches-aec749e7.tar.gz
linux-patches-aec749e7.tar.bz2
linux-patches-aec749e7.zip
Linux patch 4.14.174.14-22
-rw-r--r--0000_README8
-rw-r--r--1016_linux-4.14.17.patch5960
2 files changed, 5968 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 8311794b..a02a4f50 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,14 @@ Patch: 1015_linux-4.14.16.patch
From: http://www.kernel.org
Desc: Linux 4.14.16
+Patch: 1016_linux-4.14.17.patch
+From: http://www.kernel.org
+Desc: Linux 4.14.17
+
+Patch: 1017_linux-4.14.18.patch
+From: http://www.kernel.org
+Desc: Linux 4.14.18
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1016_linux-4.14.17.patch b/1016_linux-4.14.17.patch
new file mode 100644
index 00000000..8d3dee38
--- /dev/null
+++ b/1016_linux-4.14.17.patch
@@ -0,0 +1,5960 @@
+diff --git a/Makefile b/Makefile
+index 90a4bffa8446..7ed993896dd5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = Petit Gorille
+
+diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
+index dff66974feed..d5f5e92e7488 100644
+--- a/arch/arm/boot/dts/bcm-nsp.dtsi
++++ b/arch/arm/boot/dts/bcm-nsp.dtsi
+@@ -85,7 +85,7 @@
+ timer@20200 {
+ compatible = "arm,cortex-a9-global-timer";
+ reg = <0x20200 0x100>;
+- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&periph_clk>;
+ };
+
+@@ -93,7 +93,7 @@
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0x20600 0x20>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
+- IRQ_TYPE_LEVEL_HIGH)>;
++ IRQ_TYPE_EDGE_RISING)>;
+ clocks = <&periph_clk>;
+ };
+
+diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
+index 3bc50849d013..b8bde13de90a 100644
+--- a/arch/arm/boot/dts/bcm958623hr.dts
++++ b/arch/arm/boot/dts/bcm958623hr.dts
+@@ -141,10 +141,6 @@
+ status = "okay";
+ };
+
+-&sata {
+- status = "okay";
+-};
+-
+ &qspi {
+ bspi-sel = <0>;
+ flash: m25p80@0 {
+diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
+index d94d14b3c745..6a44b8021702 100644
+--- a/arch/arm/boot/dts/bcm958625hr.dts
++++ b/arch/arm/boot/dts/bcm958625hr.dts
+@@ -177,10 +177,6 @@
+ status = "okay";
+ };
+
+-&sata {
+- status = "okay";
+-};
+-
+ &srab {
+ compatible = "brcm,bcm58625-srab", "brcm,nsp-srab";
+ status = "okay";
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index d535edc01434..75fdeaa8c62f 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -445,10 +445,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+ int r = -EINTR;
+- sigset_t sigsaved;
+
+- if (vcpu->sigset_active)
+- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
++ kvm_sigset_activate(vcpu);
+
+ if (vcpu->mmio_needed) {
+ if (!vcpu->mmio_is_write)
+@@ -480,8 +478,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ local_irq_enable();
+
+ out:
+- if (vcpu->sigset_active)
+- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
++ kvm_sigset_deactivate(vcpu);
+
+ return r;
+ }
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index ee279c7f4802..2b02d51d14d8 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -1407,7 +1407,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+ int r;
+- sigset_t sigsaved;
+
+ if (vcpu->mmio_needed) {
+ vcpu->mmio_needed = 0;
+@@ -1448,16 +1447,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ #endif
+ }
+
+- if (vcpu->sigset_active)
+- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
++ kvm_sigset_activate(vcpu);
+
+ if (run->immediate_exit)
+ r = -EINTR;
+ else
+ r = kvmppc_vcpu_run(run, vcpu);
+
+- if (vcpu->sigset_active)
+- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
++ kvm_sigset_deactivate(vcpu);
+
+ return r;
+ }
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
+index 43607bb12cc2..a6cc744ff5fb 100644
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -28,7 +28,7 @@ static inline int init_new_context(struct task_struct *tsk,
+ #ifdef CONFIG_PGSTE
+ mm->context.alloc_pgste = page_table_allocate_pgste ||
+ test_thread_flag(TIF_PGSTE) ||
+- current->mm->context.alloc_pgste;
++ (current->mm && current->mm->context.alloc_pgste);
+ mm->context.has_pgste = 0;
+ mm->context.use_skey = 0;
+ mm->context.use_cmma = 0;
+diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
+index 55de4eb73604..de0a8b17bcaa 100644
+--- a/arch/s390/include/asm/topology.h
++++ b/arch/s390/include/asm/topology.h
+@@ -51,6 +51,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
+ static inline void topology_init_early(void) { }
+ static inline void topology_schedule_update(void) { }
+ static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
++static inline int topology_cpu_dedicated(int cpu_nr) { return 0; }
+ static inline void topology_expect_change(void) { }
+
+ #endif /* CONFIG_SCHED_TOPOLOGY */
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 092c4154abd7..7ffaf9fd6d19 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -54,6 +54,7 @@
+ #include <asm/sigp.h>
+ #include <asm/idle.h>
+ #include <asm/nmi.h>
++#include <asm/topology.h>
+ #include "entry.h"
+
+ enum {
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 6c88cb18ace2..6e3d80b2048e 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -3378,7 +3378,6 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ {
+ int rc;
+- sigset_t sigsaved;
+
+ if (kvm_run->immediate_exit)
+ return -EINTR;
+@@ -3388,8 +3387,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ return 0;
+ }
+
+- if (vcpu->sigset_active)
+- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
++ kvm_sigset_activate(vcpu);
+
+ if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
+ kvm_s390_vcpu_start(vcpu);
+@@ -3423,8 +3421,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ disable_cpu_timer_accounting(vcpu);
+ store_regs(vcpu, kvm_run);
+
+- if (vcpu->sigset_active)
+- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
++ kvm_sigset_deactivate(vcpu);
+
+ vcpu->stat.exit_userspace++;
+ return rc;
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
+index 3d09e3aca18d..12e8484a8ee7 100644
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -90,30 +90,6 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
+ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
+ .octa 0x00000000000000000000000000000000
+
+-.section .rodata
+-.align 16
+-.type aad_shift_arr, @object
+-.size aad_shift_arr, 272
+-aad_shift_arr:
+- .octa 0xffffffffffffffffffffffffffffffff
+- .octa 0xffffffffffffffffffffffffffffff0C
+- .octa 0xffffffffffffffffffffffffffff0D0C
+- .octa 0xffffffffffffffffffffffffff0E0D0C
+- .octa 0xffffffffffffffffffffffff0F0E0D0C
+- .octa 0xffffffffffffffffffffff0C0B0A0908
+- .octa 0xffffffffffffffffffff0D0C0B0A0908
+- .octa 0xffffffffffffffffff0E0D0C0B0A0908
+- .octa 0xffffffffffffffff0F0E0D0C0B0A0908
+- .octa 0xffffffffffffff0C0B0A090807060504
+- .octa 0xffffffffffff0D0C0B0A090807060504
+- .octa 0xffffffffff0E0D0C0B0A090807060504
+- .octa 0xffffffff0F0E0D0C0B0A090807060504
+- .octa 0xffffff0C0B0A09080706050403020100
+- .octa 0xffff0D0C0B0A09080706050403020100
+- .octa 0xff0E0D0C0B0A09080706050403020100
+- .octa 0x0F0E0D0C0B0A09080706050403020100
+-
+-
+ .text
+
+
+@@ -257,6 +233,37 @@ aad_shift_arr:
+ pxor \TMP1, \GH # result is in TMP1
+ .endm
+
++# Reads DLEN bytes starting at DPTR and stores in XMMDst
++# where 0 < DLEN < 16
++# Clobbers %rax, DLEN and XMM1
++.macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst
++ cmp $8, \DLEN
++ jl _read_lt8_\@
++ mov (\DPTR), %rax
++ MOVQ_R64_XMM %rax, \XMMDst
++ sub $8, \DLEN
++ jz _done_read_partial_block_\@
++ xor %eax, %eax
++_read_next_byte_\@:
++ shl $8, %rax
++ mov 7(\DPTR, \DLEN, 1), %al
++ dec \DLEN
++ jnz _read_next_byte_\@
++ MOVQ_R64_XMM %rax, \XMM1
++ pslldq $8, \XMM1
++ por \XMM1, \XMMDst
++ jmp _done_read_partial_block_\@
++_read_lt8_\@:
++ xor %eax, %eax
++_read_next_byte_lt8_\@:
++ shl $8, %rax
++ mov -1(\DPTR, \DLEN, 1), %al
++ dec \DLEN
++ jnz _read_next_byte_lt8_\@
++ MOVQ_R64_XMM %rax, \XMMDst
++_done_read_partial_block_\@:
++.endm
++
+ /*
+ * if a = number of total plaintext bytes
+ * b = floor(a/16)
+@@ -273,62 +280,30 @@ aad_shift_arr:
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ MOVADQ SHUF_MASK(%rip), %xmm14
+ mov arg7, %r10 # %r10 = AAD
+- mov arg8, %r12 # %r12 = aadLen
+- mov %r12, %r11
++ mov arg8, %r11 # %r11 = aadLen
+ pxor %xmm\i, %xmm\i
+ pxor \XMM2, \XMM2
+
+ cmp $16, %r11
+- jl _get_AAD_rest8\num_initial_blocks\operation
++ jl _get_AAD_rest\num_initial_blocks\operation
+ _get_AAD_blocks\num_initial_blocks\operation:
+ movdqu (%r10), %xmm\i
+ PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
+ pxor %xmm\i, \XMM2
+ GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+ add $16, %r10
+- sub $16, %r12
+ sub $16, %r11
+ cmp $16, %r11
+ jge _get_AAD_blocks\num_initial_blocks\operation
+
+ movdqu \XMM2, %xmm\i
++
++ /* read the last <16B of AAD */
++_get_AAD_rest\num_initial_blocks\operation:
+ cmp $0, %r11
+ je _get_AAD_done\num_initial_blocks\operation
+
+- pxor %xmm\i,%xmm\i
+-
+- /* read the last <16B of AAD. since we have at least 4B of
+- data right after the AAD (the ICV, and maybe some CT), we can
+- read 4B/8B blocks safely, and then get rid of the extra stuff */
+-_get_AAD_rest8\num_initial_blocks\operation:
+- cmp $4, %r11
+- jle _get_AAD_rest4\num_initial_blocks\operation
+- movq (%r10), \TMP1
+- add $8, %r10
+- sub $8, %r11
+- pslldq $8, \TMP1
+- psrldq $8, %xmm\i
+- pxor \TMP1, %xmm\i
+- jmp _get_AAD_rest8\num_initial_blocks\operation
+-_get_AAD_rest4\num_initial_blocks\operation:
+- cmp $0, %r11
+- jle _get_AAD_rest0\num_initial_blocks\operation
+- mov (%r10), %eax
+- movq %rax, \TMP1
+- add $4, %r10
+- sub $4, %r10
+- pslldq $12, \TMP1
+- psrldq $4, %xmm\i
+- pxor \TMP1, %xmm\i
+-_get_AAD_rest0\num_initial_blocks\operation:
+- /* finalize: shift out the extra bytes we read, and align
+- left. since pslldq can only shift by an immediate, we use
+- vpshufb and an array of shuffle masks */
+- movq %r12, %r11
+- salq $4, %r11
+- movdqu aad_shift_arr(%r11), \TMP1
+- PSHUFB_XMM \TMP1, %xmm\i
+-_get_AAD_rest_final\num_initial_blocks\operation:
++ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
+ PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
+ pxor \XMM2, %xmm\i
+ GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+@@ -532,62 +507,30 @@ _initial_blocks_done\num_initial_blocks\operation:
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ MOVADQ SHUF_MASK(%rip), %xmm14
+ mov arg7, %r10 # %r10 = AAD
+- mov arg8, %r12 # %r12 = aadLen
+- mov %r12, %r11
++ mov arg8, %r11 # %r11 = aadLen
+ pxor %xmm\i, %xmm\i
+ pxor \XMM2, \XMM2
+
+ cmp $16, %r11
+- jl _get_AAD_rest8\num_initial_blocks\operation
++ jl _get_AAD_rest\num_initial_blocks\operation
+ _get_AAD_blocks\num_initial_blocks\operation:
+ movdqu (%r10), %xmm\i
+ PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
+ pxor %xmm\i, \XMM2
+ GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+ add $16, %r10
+- sub $16, %r12
+ sub $16, %r11
+ cmp $16, %r11
+ jge _get_AAD_blocks\num_initial_blocks\operation
+
+ movdqu \XMM2, %xmm\i
++
++ /* read the last <16B of AAD */
++_get_AAD_rest\num_initial_blocks\operation:
+ cmp $0, %r11
+ je _get_AAD_done\num_initial_blocks\operation
+
+- pxor %xmm\i,%xmm\i
+-
+- /* read the last <16B of AAD. since we have at least 4B of
+- data right after the AAD (the ICV, and maybe some PT), we can
+- read 4B/8B blocks safely, and then get rid of the extra stuff */
+-_get_AAD_rest8\num_initial_blocks\operation:
+- cmp $4, %r11
+- jle _get_AAD_rest4\num_initial_blocks\operation
+- movq (%r10), \TMP1
+- add $8, %r10
+- sub $8, %r11
+- pslldq $8, \TMP1
+- psrldq $8, %xmm\i
+- pxor \TMP1, %xmm\i
+- jmp _get_AAD_rest8\num_initial_blocks\operation
+-_get_AAD_rest4\num_initial_blocks\operation:
+- cmp $0, %r11
+- jle _get_AAD_rest0\num_initial_blocks\operation
+- mov (%r10), %eax
+- movq %rax, \TMP1
+- add $4, %r10
+- sub $4, %r10
+- pslldq $12, \TMP1
+- psrldq $4, %xmm\i
+- pxor \TMP1, %xmm\i
+-_get_AAD_rest0\num_initial_blocks\operation:
+- /* finalize: shift out the extra bytes we read, and align
+- left. since pslldq can only shift by an immediate, we use
+- vpshufb and an array of shuffle masks */
+- movq %r12, %r11
+- salq $4, %r11
+- movdqu aad_shift_arr(%r11), \TMP1
+- PSHUFB_XMM \TMP1, %xmm\i
+-_get_AAD_rest_final\num_initial_blocks\operation:
++ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
+ PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
+ pxor \XMM2, %xmm\i
+ GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+@@ -1386,14 +1329,6 @@ _esb_loop_\@:
+ *
+ * AAD Format with 64-bit Extended Sequence Number
+ *
+-* aadLen:
+-* from the definition of the spec, aadLen can only be 8 or 12 bytes.
+-* The code supports 16 too but for other sizes, the code will fail.
+-*
+-* TLen:
+-* from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+-* For other sizes, the code will fail.
+-*
+ * poly = x^128 + x^127 + x^126 + x^121 + 1
+ *
+ *****************************************************************************/
+@@ -1487,19 +1422,16 @@ _zero_cipher_left_decrypt:
+ PSHUFB_XMM %xmm10, %xmm0
+
+ ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
+- sub $16, %r11
+- add %r13, %r11
+- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
+- lea SHIFT_MASK+16(%rip), %r12
+- sub %r13, %r12
+-# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
+-# (%r13 is the number of bytes in plaintext mod 16)
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
+- PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
+
++ lea (%arg3,%r11,1), %r10
++ mov %r13, %r12
++ READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
++
++ lea ALL_F+16(%rip), %r12
++ sub %r13, %r12
+ movdqa %xmm1, %xmm2
+ pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
++ movdqu (%r12), %xmm1
+ # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
+ pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
+ pand %xmm1, %xmm2
+@@ -1508,9 +1440,6 @@ _zero_cipher_left_decrypt:
+
+ pxor %xmm2, %xmm8
+ GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+- # GHASH computation for the last <16 byte block
+- sub %r13, %r11
+- add $16, %r11
+
+ # output %r13 bytes
+ MOVQ_R64_XMM %xmm0, %rax
+@@ -1664,14 +1593,6 @@ ENDPROC(aesni_gcm_dec)
+ *
+ * AAD Format with 64-bit Extended Sequence Number
+ *
+-* aadLen:
+-* from the definition of the spec, aadLen can only be 8 or 12 bytes.
+-* The code supports 16 too but for other sizes, the code will fail.
+-*
+-* TLen:
+-* from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+-* For other sizes, the code will fail.
+-*
+ * poly = x^128 + x^127 + x^126 + x^121 + 1
+ ***************************************************************************/
+ ENTRY(aesni_gcm_enc)
+@@ -1764,19 +1685,16 @@ _zero_cipher_left_encrypt:
+ movdqa SHUF_MASK(%rip), %xmm10
+ PSHUFB_XMM %xmm10, %xmm0
+
+-
+ ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
+- sub $16, %r11
+- add %r13, %r11
+- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
+- lea SHIFT_MASK+16(%rip), %r12
++
++ lea (%arg3,%r11,1), %r10
++ mov %r13, %r12
++ READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
++
++ lea ALL_F+16(%rip), %r12
+ sub %r13, %r12
+- # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+- # (%r13 is the number of bytes in plaintext mod 16)
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
+- PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
+ pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
++ movdqu (%r12), %xmm1
+ # get the appropriate mask to mask out top 16-r13 bytes of xmm0
+ pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
+ movdqa SHUF_MASK(%rip), %xmm10
+@@ -1785,9 +1703,6 @@ _zero_cipher_left_encrypt:
+ pxor %xmm0, %xmm8
+ GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+ # GHASH computation for the last <16 byte block
+- sub %r13, %r11
+- add $16, %r11
+-
+ movdqa SHUF_MASK(%rip), %xmm10
+ PSHUFB_XMM %xmm10, %xmm0
+
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index 5c15d6b57329..c690ddc78c03 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -28,6 +28,7 @@
+ #include <crypto/cryptd.h>
+ #include <crypto/ctr.h>
+ #include <crypto/b128ops.h>
++#include <crypto/gcm.h>
+ #include <crypto/xts.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/fpu/api.h>
+@@ -689,8 +690,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
+ rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
+ }
+
+-static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
+- unsigned int key_len)
++static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
++ unsigned int key_len)
+ {
+ struct cryptd_aead **ctx = crypto_aead_ctx(parent);
+ struct cryptd_aead *cryptd_tfm = *ctx;
+@@ -715,8 +716,8 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
+
+ /* This is the Integrity Check Value (aka the authentication tag length and can
+ * be 8, 12 or 16 bytes long. */
+-static int rfc4106_set_authsize(struct crypto_aead *parent,
+- unsigned int authsize)
++static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
++ unsigned int authsize)
+ {
+ struct cryptd_aead **ctx = crypto_aead_ctx(parent);
+ struct cryptd_aead *cryptd_tfm = *ctx;
+@@ -823,7 +824,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
+ if (sg_is_last(req->src) &&
+ (!PageHighMem(sg_page(req->src)) ||
+ req->src->offset + req->src->length <= PAGE_SIZE) &&
+- sg_is_last(req->dst) &&
++ sg_is_last(req->dst) && req->dst->length &&
+ (!PageHighMem(sg_page(req->dst)) ||
+ req->dst->offset + req->dst->length <= PAGE_SIZE)) {
+ one_entry_in_sg = 1;
+@@ -928,7 +929,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
+ aes_ctx);
+ }
+
+-static int rfc4106_encrypt(struct aead_request *req)
++static int gcmaes_wrapper_encrypt(struct aead_request *req)
+ {
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
+@@ -944,7 +945,7 @@ static int rfc4106_encrypt(struct aead_request *req)
+ return crypto_aead_encrypt(req);
+ }
+
+-static int rfc4106_decrypt(struct aead_request *req)
++static int gcmaes_wrapper_decrypt(struct aead_request *req)
+ {
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
+@@ -1115,7 +1116,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
+ {
+ __be32 counter = cpu_to_be32(1);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
++ struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
+ void *aes_ctx = &(ctx->aes_key_expanded);
+ u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
+
+@@ -1126,12 +1127,36 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
+ aes_ctx);
+ }
+
++static int generic_gcmaes_init(struct crypto_aead *aead)
++{
++ struct cryptd_aead *cryptd_tfm;
++ struct cryptd_aead **ctx = crypto_aead_ctx(aead);
++
++ cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
++ CRYPTO_ALG_INTERNAL,
++ CRYPTO_ALG_INTERNAL);
++ if (IS_ERR(cryptd_tfm))
++ return PTR_ERR(cryptd_tfm);
++
++ *ctx = cryptd_tfm;
++ crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
++
++ return 0;
++}
++
++static void generic_gcmaes_exit(struct crypto_aead *aead)
++{
++ struct cryptd_aead **ctx = crypto_aead_ctx(aead);
++
++ cryptd_free_aead(*ctx);
++}
++
+ static struct aead_alg aesni_aead_algs[] = { {
+ .setkey = common_rfc4106_set_key,
+ .setauthsize = common_rfc4106_set_authsize,
+ .encrypt = helper_rfc4106_encrypt,
+ .decrypt = helper_rfc4106_decrypt,
+- .ivsize = 8,
++ .ivsize = GCM_RFC4106_IV_SIZE,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "__gcm-aes-aesni",
+@@ -1145,11 +1170,11 @@ static struct aead_alg aesni_aead_algs[] = { {
+ }, {
+ .init = rfc4106_init,
+ .exit = rfc4106_exit,
+- .setkey = rfc4106_set_key,
+- .setauthsize = rfc4106_set_authsize,
+- .encrypt = rfc4106_encrypt,
+- .decrypt = rfc4106_decrypt,
+- .ivsize = 8,
++ .setkey = gcmaes_wrapper_set_key,
++ .setauthsize = gcmaes_wrapper_set_authsize,
++ .encrypt = gcmaes_wrapper_encrypt,
++ .decrypt = gcmaes_wrapper_decrypt,
++ .ivsize = GCM_RFC4106_IV_SIZE,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+@@ -1165,7 +1190,26 @@ static struct aead_alg aesni_aead_algs[] = { {
+ .setauthsize = generic_gcmaes_set_authsize,
+ .encrypt = generic_gcmaes_encrypt,
+ .decrypt = generic_gcmaes_decrypt,
+- .ivsize = 12,
++ .ivsize = GCM_AES_IV_SIZE,
++ .maxauthsize = 16,
++ .base = {
++ .cra_name = "__generic-gcm-aes-aesni",
++ .cra_driver_name = "__driver-generic-gcm-aes-aesni",
++ .cra_priority = 0,
++ .cra_flags = CRYPTO_ALG_INTERNAL,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
++ .cra_alignmask = AESNI_ALIGN - 1,
++ .cra_module = THIS_MODULE,
++ },
++}, {
++ .init = generic_gcmaes_init,
++ .exit = generic_gcmaes_exit,
++ .setkey = gcmaes_wrapper_set_key,
++ .setauthsize = gcmaes_wrapper_set_authsize,
++ .encrypt = gcmaes_wrapper_encrypt,
++ .decrypt = gcmaes_wrapper_decrypt,
++ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "gcm(aes)",
+@@ -1173,8 +1217,7 @@ static struct aead_alg aesni_aead_algs[] = { {
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+- .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
+- .cra_alignmask = AESNI_ALIGN - 1,
++ .cra_ctxsize = sizeof(struct cryptd_aead *),
+ .cra_module = THIS_MODULE,
+ },
+ } };
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index eb38ac9d9a31..4f8b80199672 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1156,7 +1156,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
+ int emulation_type)
+ {
+- return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
++ return x86_emulate_instruction(vcpu, 0,
++ emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
+ }
+
+ void kvm_enable_efer_bits(u64);
+diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
+index b20f9d623f9c..8f09012b92e7 100644
+--- a/arch/x86/include/asm/segment.h
++++ b/arch/x86/include/asm/segment.h
+@@ -236,11 +236,23 @@
+ */
+ #define EARLY_IDT_HANDLER_SIZE 9
+
++/*
++ * xen_early_idt_handler_array is for Xen pv guests: for each entry in
++ * early_idt_handler_array it contains a prequel in the form of
++ * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to
++ * max 8 bytes.
++ */
++#define XEN_EARLY_IDT_HANDLER_SIZE 8
++
+ #ifndef __ASSEMBLY__
+
+ extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
+ extern void early_ignore_irq(void);
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
++extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE];
++#endif
++
+ /*
+ * Load a segment. Fall back on loading the zero segment if something goes
+ * wrong. This variant assumes that loading zero fully clears the segment.
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index cdc70a3a6583..c2cea6651279 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -44,7 +44,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
+ [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
+ [CPUID_1_ECX] = { 1, 0, CPUID_ECX},
+ [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
+- [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
++ [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
+ [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
+ [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
+ [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX},
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 7bbb5da2b49d..eca6a89f2326 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4023,6 +4023,26 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
+ fxstate_size(ctxt));
+ }
+
++/*
++ * FXRSTOR might restore XMM registers not provided by the guest. Fill
++ * in the host registers (via FXSAVE) instead, so they won't be modified.
++ * (preemption has to stay disabled until FXRSTOR).
++ *
++ * Use noinline to keep the stack for other functions called by callers small.
++ */
++static noinline int fxregs_fixup(struct fxregs_state *fx_state,
++ const size_t used_size)
++{
++ struct fxregs_state fx_tmp;
++ int rc;
++
++ rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
++ memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
++ __fxstate_size(16) - used_size);
++
++ return rc;
++}
++
+ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
+ {
+ struct fxregs_state fx_state;
+@@ -4033,19 +4053,19 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
++ size = fxstate_size(ctxt);
++ rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++
+ ctxt->ops->get_fpu(ctxt);
+
+- size = fxstate_size(ctxt);
+ if (size < __fxstate_size(16)) {
+- rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
++ rc = fxregs_fixup(&fx_state, size);
+ if (rc != X86EMUL_CONTINUE)
+ goto out;
+ }
+
+- rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+- if (rc != X86EMUL_CONTINUE)
+- goto out;
+-
+ if (fx_state.mxcsr >> 16) {
+ rc = emulate_gp(ctxt, 0);
+ goto out;
+@@ -5009,6 +5029,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+ bool op_prefix = false;
+ bool has_seg_override = false;
+ struct opcode opcode;
++ u16 dummy;
++ struct desc_struct desc;
+
+ ctxt->memop.type = OP_NONE;
+ ctxt->memopp = NULL;
+@@ -5027,6 +5049,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+ switch (mode) {
+ case X86EMUL_MODE_REAL:
+ case X86EMUL_MODE_VM86:
++ def_op_bytes = def_ad_bytes = 2;
++ ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
++ if (desc.d)
++ def_op_bytes = def_ad_bytes = 4;
++ break;
+ case X86EMUL_MODE_PROT16:
+ def_op_bytes = def_ad_bytes = 2;
+ break;
+diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
+index bdff437acbcb..9d270ba9643c 100644
+--- a/arch/x86/kvm/ioapic.c
++++ b/arch/x86/kvm/ioapic.c
+@@ -257,8 +257,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
+ index == RTC_GSI) {
+ if (kvm_apic_match_dest(vcpu, NULL, 0,
+ e->fields.dest_id, e->fields.dest_mode) ||
+- (e->fields.trig_mode == IOAPIC_EDGE_TRIG &&
+- kvm_apic_pending_eoi(vcpu, e->fields.vector)))
++ kvm_apic_pending_eoi(vcpu, e->fields.vector))
+ __set_bit(e->fields.vector,
+ ioapic_handled_vectors);
+ }
+@@ -277,6 +276,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
+ {
+ unsigned index;
+ bool mask_before, mask_after;
++ int old_remote_irr, old_delivery_status;
+ union kvm_ioapic_redirect_entry *e;
+
+ switch (ioapic->ioregsel) {
+@@ -299,14 +299,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
+ return;
+ e = &ioapic->redirtbl[index];
+ mask_before = e->fields.mask;
++ /* Preserve read-only fields */
++ old_remote_irr = e->fields.remote_irr;
++ old_delivery_status = e->fields.delivery_status;
+ if (ioapic->ioregsel & 1) {
+ e->bits &= 0xffffffff;
+ e->bits |= (u64) val << 32;
+ } else {
+ e->bits &= ~0xffffffffULL;
+ e->bits |= (u32) val;
+- e->fields.remote_irr = 0;
+ }
++ e->fields.remote_irr = old_remote_irr;
++ e->fields.delivery_status = old_delivery_status;
++
++ /*
++ * Some OSes (Linux, Xen) assume that Remote IRR bit will
++ * be cleared by IOAPIC hardware when the entry is configured
++ * as edge-triggered. This behavior is used to simulate an
++ * explicit EOI on IOAPICs that don't have the EOI register.
++ */
++ if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
++ e->fields.remote_irr = 0;
++
+ mask_after = e->fields.mask;
+ if (mask_before != mask_after)
+ kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index ef16cf0f7cfd..a45063a9219c 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -5606,7 +5606,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+ vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
+ }
+
+- vmcs_writel(GUEST_RFLAGS, 0x02);
++ kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
+ kvm_rip_write(vcpu, 0xfff0);
+
+ vmcs_writel(GUEST_GDTR_BASE, 0);
+@@ -5919,10 +5919,6 @@ static int handle_exception(struct kvm_vcpu *vcpu)
+ return 1; /* already handled by vmx_vcpu_run() */
+
+ if (is_invalid_opcode(intr_info)) {
+- if (is_guest_mode(vcpu)) {
+- kvm_queue_exception(vcpu, UD_VECTOR);
+- return 1;
+- }
+ er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
+ if (er == EMULATE_USER_EXIT)
+ return 0;
+@@ -6608,7 +6604,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
+ if (kvm_test_request(KVM_REQ_EVENT, vcpu))
+ return 1;
+
+- err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
++ err = emulate_instruction(vcpu, 0);
+
+ if (err == EMULATE_USER_EXIT) {
+ ++vcpu->stat.mmio_exits;
+@@ -11115,13 +11111,12 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long exit_qual;
+-
+- if (kvm_event_needs_reinjection(vcpu))
+- return -EBUSY;
++ bool block_nested_events =
++ vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
+
+ if (vcpu->arch.exception.pending &&
+ nested_vmx_check_exception(vcpu, &exit_qual)) {
+- if (vmx->nested.nested_run_pending)
++ if (block_nested_events)
+ return -EBUSY;
+ nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
+ vcpu->arch.exception.pending = false;
+@@ -11130,14 +11125,14 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
+
+ if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
+ vmx->nested.preemption_timer_expired) {
+- if (vmx->nested.nested_run_pending)
++ if (block_nested_events)
+ return -EBUSY;
+ nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
+ return 0;
+ }
+
+ if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
+- if (vmx->nested.nested_run_pending)
++ if (block_nested_events)
+ return -EBUSY;
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
+ NMI_VECTOR | INTR_TYPE_NMI_INTR |
+@@ -11153,7 +11148,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
+
+ if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
+ nested_exit_on_intr(vcpu)) {
+- if (vmx->nested.nested_run_pending)
++ if (block_nested_events)
+ return -EBUSY;
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
+ return 0;
+@@ -11340,6 +11335,24 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ kvm_clear_interrupt_queue(vcpu);
+ }
+
++static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu,
++ struct vmcs12 *vmcs12)
++{
++ u32 entry_failure_code;
++
++ nested_ept_uninit_mmu_context(vcpu);
++
++ /*
++ * Only PDPTE load can fail as the value of cr3 was checked on entry and
++ * couldn't have changed.
++ */
++ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
++ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
++
++ if (!enable_ept)
++ vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
++}
++
+ /*
+ * A part of what we need to when the nested L2 guest exits and we want to
+ * run its L1 parent, is to reset L1's guest state to the host state specified
+@@ -11353,7 +11366,6 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+ {
+ struct kvm_segment seg;
+- u32 entry_failure_code;
+
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
+ vcpu->arch.efer = vmcs12->host_ia32_efer;
+@@ -11380,17 +11392,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+ vmx_set_cr4(vcpu, vmcs12->host_cr4);
+
+- nested_ept_uninit_mmu_context(vcpu);
+-
+- /*
+- * Only PDPTE load can fail as the value of cr3 was checked on entry and
+- * couldn't have changed.
+- */
+- if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+- nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
+-
+- if (!enable_ept)
+- vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
++ load_vmcs12_mmu_host_state(vcpu, vmcs12);
+
+ if (enable_vpid) {
+ /*
+@@ -11616,6 +11618,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
+ * accordingly.
+ */
+ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
++
++ load_vmcs12_mmu_host_state(vcpu, vmcs12);
++
+ /*
+ * The emulated instruction was already skipped in
+ * nested_vmx_run, but the updated RIP was never
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 575c8953cc9a..8c28023a43b1 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1795,10 +1795,13 @@ u64 get_kvmclock_ns(struct kvm *kvm)
+ /* both __this_cpu_read() and rdtsc() should be on the same cpu */
+ get_cpu();
+
+- kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
+- &hv_clock.tsc_shift,
+- &hv_clock.tsc_to_system_mul);
+- ret = __pvclock_read_cycles(&hv_clock, rdtsc());
++ if (__this_cpu_read(cpu_tsc_khz)) {
++ kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
++ &hv_clock.tsc_shift,
++ &hv_clock.tsc_to_system_mul);
++ ret = __pvclock_read_cycles(&hv_clock, rdtsc());
++ } else
++ ret = ktime_get_boot_ns() + ka->kvmclock_offset;
+
+ put_cpu();
+
+@@ -5416,7 +5419,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+- r = EMULATE_FAIL;
++ r = EMULATE_USER_EXIT;
+ }
+ kvm_queue_exception(vcpu, UD_VECTOR);
+
+@@ -7242,12 +7245,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ {
+ struct fpu *fpu = &current->thread.fpu;
+ int r;
+- sigset_t sigsaved;
+
+ fpu__initialize(fpu);
+
+- if (vcpu->sigset_active)
+- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
++ kvm_sigset_activate(vcpu);
+
+ if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
+ if (kvm_run->immediate_exit) {
+@@ -7290,8 +7291,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+
+ out:
+ post_kvm_run_save(vcpu);
+- if (vcpu->sigset_active)
+- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
++ kvm_sigset_deactivate(vcpu);
+
+ return r;
+ }
+diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
+index 30bc4812ceb8..9fe656c42aa5 100644
+--- a/arch/x86/mm/extable.c
++++ b/arch/x86/mm/extable.c
+@@ -1,6 +1,7 @@
+ #include <linux/extable.h>
+ #include <linux/uaccess.h>
+ #include <linux/sched/debug.h>
++#include <xen/xen.h>
+
+ #include <asm/fpu/internal.h>
+ #include <asm/traps.h>
+@@ -212,8 +213,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
+ * Old CPUs leave the high bits of CS on the stack
+ * undefined. I'm not sure which CPUs do this, but at least
+ * the 486 DX works this way.
++ * Xen pv domains are not using the default __KERNEL_CS.
+ */
+- if (regs->cs != __KERNEL_CS)
++ if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
+ goto fail;
+
+ /*
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index ae3a071e1d0f..899a22a02e95 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -622,7 +622,7 @@ static struct trap_array_entry trap_array[] = {
+ { simd_coprocessor_error, xen_simd_coprocessor_error, false },
+ };
+
+-static bool get_trap_addr(void **addr, unsigned int ist)
++static bool __ref get_trap_addr(void **addr, unsigned int ist)
+ {
+ unsigned int nr;
+ bool ist_okay = false;
+@@ -644,6 +644,14 @@ static bool get_trap_addr(void **addr, unsigned int ist)
+ }
+ }
+
++ if (nr == ARRAY_SIZE(trap_array) &&
++ *addr >= (void *)early_idt_handler_array[0] &&
++ *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) {
++ nr = (*addr - (void *)early_idt_handler_array[0]) /
++ EARLY_IDT_HANDLER_SIZE;
++ *addr = (void *)xen_early_idt_handler_array[nr];
++ }
++
+ if (WARN_ON(ist != 0 && !ist_okay))
+ return false;
+
+@@ -1261,6 +1269,21 @@ asmlinkage __visible void __init xen_start_kernel(void)
+ xen_setup_gdt(0);
+
+ xen_init_irq_ops();
++
++ /* Let's presume PV guests always boot on vCPU with id 0. */
++ per_cpu(xen_vcpu_id, 0) = 0;
++
++ /*
++ * Setup xen_vcpu early because idt_setup_early_handler needs it for
++ * local_irq_disable(), irqs_disabled().
++ *
++ * Don't do the full vcpu_info placement stuff until we have
++ * the cpu_possible_mask and a non-dummy shared_info.
++ */
++ xen_vcpu_info_reset(0);
++
++ idt_setup_early_handler();
++
+ xen_init_capabilities();
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+@@ -1294,18 +1317,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
+ */
+ acpi_numa = -1;
+ #endif
+- /* Let's presume PV guests always boot on vCPU with id 0. */
+- per_cpu(xen_vcpu_id, 0) = 0;
+-
+- /*
+- * Setup xen_vcpu early because start_kernel needs it for
+- * local_irq_disable(), irqs_disabled().
+- *
+- * Don't do the full vcpu_info placement stuff until we have
+- * the cpu_possible_mask and a non-dummy shared_info.
+- */
+- xen_vcpu_info_reset(0);
+-
+ WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
+
+ local_irq_disable();
+diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
+index 8a10c9a9e2b5..417b339e5c8e 100644
+--- a/arch/x86/xen/xen-asm_64.S
++++ b/arch/x86/xen/xen-asm_64.S
+@@ -15,6 +15,7 @@
+
+ #include <xen/interface/xen.h>
+
++#include <linux/init.h>
+ #include <linux/linkage.h>
+
+ .macro xen_pv_trap name
+@@ -54,6 +55,19 @@ xen_pv_trap entry_INT80_compat
+ #endif
+ xen_pv_trap hypervisor_callback
+
++ __INIT
++ENTRY(xen_early_idt_handler_array)
++ i = 0
++ .rept NUM_EXCEPTION_VECTORS
++ pop %rcx
++ pop %r11
++ jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
++ i = i + 1
++ .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
++ .endr
++END(xen_early_idt_handler_array)
++ __FINIT
++
+ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
+ /*
+ * Xen64 iret frame:
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index ac5fb37e6f4b..42212b60a0ee 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -130,7 +130,7 @@ config CRYPTO_DH
+
+ config CRYPTO_ECDH
+ tristate "ECDH algorithm"
+- select CRYTPO_KPP
++ select CRYPTO_KPP
+ select CRYPTO_RNG_DEFAULT
+ help
+ Generic implementation of the ECDH algorithm
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index 6ec360213107..53b7fa4cf4ab 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent);
+
+ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ {
+- const u32 forbidden = CRYPTO_ALG_INTERNAL;
++ const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct sockaddr_alg *sa = (void *)uaddr;
+@@ -158,6 +158,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ void *private;
+ int err;
+
++ /* If caller uses non-allowed flag, return error. */
++ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
++ return -EINVAL;
++
+ if (sock->state == SS_CONNECTED)
+ return -EINVAL;
+
+@@ -176,9 +180,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ if (IS_ERR(type))
+ return PTR_ERR(type);
+
+- private = type->bind(sa->salg_name,
+- sa->salg_feat & ~forbidden,
+- sa->salg_mask & ~forbidden);
++ private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
+ if (IS_ERR(private)) {
+ module_put(type->owner);
+ return PTR_ERR(private);
+diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
+index 7e8ed96236ce..a68be626017c 100644
+--- a/crypto/sha3_generic.c
++++ b/crypto/sha3_generic.c
+@@ -18,6 +18,7 @@
+ #include <linux/types.h>
+ #include <crypto/sha3.h>
+ #include <asm/byteorder.h>
++#include <asm/unaligned.h>
+
+ #define KECCAK_ROUNDS 24
+
+@@ -149,7 +150,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int i;
+
+ for (i = 0; i < sctx->rsizw; i++)
+- sctx->st[i] ^= ((u64 *) src)[i];
++ sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
+ keccakf(sctx->st);
+
+ done += sctx->rsiz;
+@@ -174,7 +175,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out)
+ sctx->buf[sctx->rsiz - 1] |= 0x80;
+
+ for (i = 0; i < sctx->rsizw; i++)
+- sctx->st[i] ^= ((u64 *) sctx->buf)[i];
++ sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
+
+ keccakf(sctx->st);
+
+diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
+index 24418932612e..a041689e5701 100644
+--- a/drivers/acpi/device_sysfs.c
++++ b/drivers/acpi/device_sysfs.c
+@@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
+ int count;
+ struct acpi_hardware_id *id;
+
++ /* Avoid unnecessarily loading modules for non present devices. */
++ if (!acpi_device_is_present(acpi_dev))
++ return 0;
++
+ /*
+ * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
+ * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index a340766b51fe..2ef8bd29e188 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -4302,6 +4302,18 @@ static int binder_thread_release(struct binder_proc *proc,
+ if (t)
+ spin_lock(&t->lock);
+ }
++
++ /*
++ * If this thread used poll, make sure we remove the waitqueue
++ * from any epoll data structures holding it with POLLFREE.
++ * waitqueue_active() is safe to use here because we're holding
++ * the inner lock.
++ */
++ if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
++ waitqueue_active(&thread->wait)) {
++ wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
++ }
++
+ binder_inner_proc_unlock(thread->proc);
+
+ if (send_reply)
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index c2819a3d58a6..6cb148268676 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -668,7 +668,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ goto err_already_mapped;
+ }
+
+- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
++ area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
+index d7d21118d3e0..2c2ed9cf8796 100644
+--- a/drivers/auxdisplay/Kconfig
++++ b/drivers/auxdisplay/Kconfig
+@@ -136,6 +136,7 @@ config CFAG12864B_RATE
+
+ config IMG_ASCII_LCD
+ tristate "Imagination Technologies ASCII LCD Display"
++ depends on HAS_IOMEM
+ default y if MIPS_MALTA || MIPS_SEAD3
+ select SYSCON
+ help
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 85de67334695..a2a0dce5114e 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1576,9 +1576,8 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
+ return err;
+ }
+
+-static void lo_release(struct gendisk *disk, fmode_t mode)
++static void __lo_release(struct loop_device *lo)
+ {
+- struct loop_device *lo = disk->private_data;
+ int err;
+
+ if (atomic_dec_return(&lo->lo_refcnt))
+@@ -1605,6 +1604,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
+ mutex_unlock(&lo->lo_ctl_mutex);
+ }
+
++static void lo_release(struct gendisk *disk, fmode_t mode)
++{
++ mutex_lock(&loop_index_mutex);
++ __lo_release(disk->private_data);
++ mutex_unlock(&loop_index_mutex);
++}
++
+ static const struct block_device_operations lo_fops = {
+ .owner = THIS_MODULE,
+ .open = lo_open,
+diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
+index 4d55af5c6e5b..69dfa1d3f453 100644
+--- a/drivers/block/null_blk.c
++++ b/drivers/block/null_blk.c
+@@ -467,7 +467,6 @@ static void nullb_device_release(struct config_item *item)
+ {
+ struct nullb_device *dev = to_nullb_device(item);
+
+- badblocks_exit(&dev->badblocks);
+ null_free_device_storage(dev, false);
+ null_free_dev(dev);
+ }
+@@ -578,6 +577,10 @@ static struct nullb_device *null_alloc_dev(void)
+
+ static void null_free_dev(struct nullb_device *dev)
+ {
++ if (!dev)
++ return;
++
++ badblocks_exit(&dev->badblocks);
+ kfree(dev);
+ }
+
+diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
+index 4ebae43118ef..d8addbce40bc 100644
+--- a/drivers/cpufreq/Kconfig
++++ b/drivers/cpufreq/Kconfig
+@@ -275,6 +275,7 @@ config BMIPS_CPUFREQ
+
+ config LOONGSON2_CPUFREQ
+ tristate "Loongson2 CPUFreq Driver"
++ depends on LEMOTE_MACH2F
+ help
+ This option adds a CPUFreq driver for loongson processors which
+ support software configurable cpu frequency.
+@@ -287,6 +288,7 @@ config LOONGSON2_CPUFREQ
+
+ config LOONGSON1_CPUFREQ
+ tristate "Loongson1 CPUFreq Driver"
++ depends on LOONGSON1_LS1B
+ help
+ This option adds a CPUFreq driver for loongson1 processors which
+ support software configurable cpu frequency.
+diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
+index 3980f946874f..0626b33d2886 100644
+--- a/drivers/crypto/inside-secure/safexcel_hash.c
++++ b/drivers/crypto/inside-secure/safexcel_hash.c
+@@ -33,6 +33,8 @@ struct safexcel_ahash_req {
+ bool finish;
+ bool hmac;
+
++ int nents;
++
+ u8 state_sz; /* expected sate size, only set once */
+ u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
+
+@@ -151,8 +153,10 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ result_sz = crypto_ahash_digestsize(ahash);
+ memcpy(sreq->state, areq->result, result_sz);
+
+- dma_unmap_sg(priv->dev, areq->src,
+- sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
++ if (sreq->nents) {
++ dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
++ sreq->nents = 0;
++ }
+
+ safexcel_free_context(priv, async, sreq->state_sz);
+
+@@ -177,7 +181,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
+ struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
+ struct safexcel_result_desc *rdesc;
+ struct scatterlist *sg;
+- int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
++ int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+
+ queued = len = req->len - req->processed;
+ if (queued < crypto_ahash_blocksize(ahash))
+@@ -185,17 +189,31 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
+ else
+ cache_len = queued - areq->nbytes;
+
+- /*
+- * If this is not the last request and the queued data does not fit
+- * into full blocks, cache it for the next send() call.
+- */
+- extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+- if (!req->last_req && extra) {
+- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+- req->cache_next, extra, areq->nbytes - extra);
+-
+- queued -= extra;
+- len -= extra;
++ if (!req->last_req) {
++ /* If this is not the last request and the queued data does not
++ * fit into full blocks, cache it for the next send() call.
++ */
++ extra = queued & (crypto_ahash_blocksize(ahash) - 1);
++ if (!extra)
++ /* If this is not the last request and the queued data
++ * is a multiple of a block, cache the last one for now.
++ */
++ extra = queued - crypto_ahash_blocksize(ahash);
++
++ if (extra) {
++ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
++ req->cache_next, extra,
++ areq->nbytes - extra);
++
++ queued -= extra;
++ len -= extra;
++
++ if (!queued) {
++ *commands = 0;
++ *results = 0;
++ return 0;
++ }
++ }
+ }
+
+ spin_lock_bh(&priv->ring[ring].egress_lock);
+@@ -233,15 +251,15 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
+ }
+
+ /* Now handle the current ahash request buffer(s) */
+- nents = dma_map_sg(priv->dev, areq->src,
+- sg_nents_for_len(areq->src, areq->nbytes),
+- DMA_TO_DEVICE);
+- if (!nents) {
++ req->nents = dma_map_sg(priv->dev, areq->src,
++ sg_nents_for_len(areq->src, areq->nbytes),
++ DMA_TO_DEVICE);
++ if (!req->nents) {
+ ret = -ENOMEM;
+ goto cdesc_rollback;
+ }
+
+- for_each_sg(areq->src, sg, nents, i) {
++ for_each_sg(areq->src, sg, req->nents, i) {
+ int sglen = sg_dma_len(sg);
+
+ /* Do not overflow the request */
+diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
+index 2b4c39fdfa91..86210f75d233 100644
+--- a/drivers/firmware/efi/Kconfig
++++ b/drivers/firmware/efi/Kconfig
+@@ -159,7 +159,10 @@ config RESET_ATTACK_MITIGATION
+ using the TCG Platform Reset Attack Mitigation specification. This
+ protects against an attacker forcibly rebooting the system while it
+ still contains secrets in RAM, booting another OS and extracting the
+- secrets.
++ secrets. This should only be enabled when userland is configured to
++ clear the MemoryOverwriteRequest flag on clean shutdown after secrets
++ have been evicted, since otherwise it will trigger even on clean
++ reboots.
+
+ endmenu
+
+diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
+index f33d4a5fe671..af0baf8da295 100644
+--- a/drivers/gpio/gpio-ath79.c
++++ b/drivers/gpio/gpio-ath79.c
+@@ -323,3 +323,6 @@ static struct platform_driver ath79_gpio_driver = {
+ };
+
+ module_platform_driver(ath79_gpio_driver);
++
++MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
+index 98c7ff2a76e7..8d62db447ec1 100644
+--- a/drivers/gpio/gpio-iop.c
++++ b/drivers/gpio/gpio-iop.c
+@@ -58,3 +58,7 @@ static int __init iop3xx_gpio_init(void)
+ return platform_driver_register(&iop3xx_gpio_driver);
+ }
+ arch_initcall(iop3xx_gpio_init);
++
++MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
++MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
+index 16cbc5702865..491b0974c0fe 100644
+--- a/drivers/gpio/gpio-stmpe.c
++++ b/drivers/gpio/gpio-stmpe.c
+@@ -190,6 +190,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
+ };
+ int i, j;
+
++ /*
++ * STMPE1600: to be able to get IRQ from pins,
++ * a read must be done on GPMR register, or a write in
++ * GPSR or GPCR registers
++ */
++ if (stmpe->partnum == STMPE1600) {
++ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
++ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
++ }
++
+ for (i = 0; i < CACHE_NR_REGS; i++) {
+ /* STMPE801 and STMPE1600 don't have RE and FE registers */
+ if ((stmpe->partnum == STMPE801 ||
+@@ -227,21 +237,11 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
+- struct stmpe *stmpe = stmpe_gpio->stmpe;
+ int offset = d->hwirq;
+ int regoffset = offset / 8;
+ int mask = BIT(offset % 8);
+
+ stmpe_gpio->regs[REG_IE][regoffset] |= mask;
+-
+- /*
+- * STMPE1600 workaround: to be able to get IRQ from pins,
+- * a read must be done on GPMR register, or a write in
+- * GPSR or GPCR registers
+- */
+- if (stmpe->partnum == STMPE1600)
+- stmpe_reg_read(stmpe,
+- stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]);
+ }
+
+ static void stmpe_dbg_show_one(struct seq_file *s,
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index eb80dac4e26a..bdd68ff197dc 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -723,6 +723,9 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
+ struct gpioevent_data ge;
+ int ret, level;
+
++ /* Do not leak kernel stack to userspace */
++ memset(&ge, 0, sizeof(ge));
++
+ ge.timestamp = ktime_get_real_ns();
+ level = gpiod_get_value_cansleep(le->desc);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index b9dbbf9cb8b0..bdabaa3399db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -369,29 +369,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+ {
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct cik_sdma_rlc_registers *m;
++ unsigned long end_jiffies;
+ uint32_t sdma_base_addr;
++ uint32_t data;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+- m->sdma_rlc_virtual_addr);
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
++ m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
+- m->sdma_rlc_rb_base);
++ end_jiffies = msecs_to_jiffies(2000) + jiffies;
++ while (true) {
++ data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
++ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
++ break;
++ if (time_after(jiffies, end_jiffies))
++ return -ETIME;
++ usleep_range(500, 1000);
++ }
++ if (m->sdma_engine_id) {
++ data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
++ data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
++ RESUME_CTX, 0);
++ WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
++ } else {
++ data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
++ data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
++ RESUME_CTX, 0);
++ WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
++ }
+
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
++ m->sdma_rlc_doorbell);
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
++ m->sdma_rlc_virtual_addr);
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ m->sdma_rlc_rb_base_hi);
+-
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdma_rlc_rb_rptr_addr_lo);
+-
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdma_rlc_rb_rptr_addr_hi);
+-
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+- m->sdma_rlc_doorbell);
+-
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ m->sdma_rlc_rb_cntl);
+
+@@ -564,9 +585,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ }
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
++ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
++ RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
++ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 60d8bedb694d..b5aa8e6f8e0b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -403,6 +403,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+ if (candidate->robj == validated)
+ break;
+
++ /* We can't move pinned BOs here */
++ if (bo->pin_count)
++ continue;
++
+ other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+ /* Check if this BO is in one of the domains we need space for */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+index 44ffd23348fc..164fa4b1f9a9 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+@@ -205,8 +205,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ struct cik_sdma_rlc_registers *m;
+
+ m = get_sdma_mqd(mqd);
+- m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
+- SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
++ m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
++ << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+ q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
+ 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
+ 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 03bec765b03d..f9a1a4db9be7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -184,6 +184,24 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+
+ switch (type) {
+ case KFD_QUEUE_TYPE_SDMA:
++ if (dev->dqm->queue_count >=
++ CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
++ pr_err("Over-subscription is not allowed for SDMA.\n");
++ retval = -EPERM;
++ goto err_create_queue;
++ }
++
++ retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
++ if (retval != 0)
++ goto err_create_queue;
++ pqn->q = q;
++ pqn->kq = NULL;
++ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
++ &q->properties.vmid);
++ pr_debug("DQM returned %d for create_queue\n", retval);
++ print_queue(q);
++ break;
++
+ case KFD_QUEUE_TYPE_COMPUTE:
+ /* check if there is over subscription */
+ if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
+diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
+index 0903ba574f61..75b0d3f6e4de 100644
+--- a/drivers/gpu/drm/bridge/lvds-encoder.c
++++ b/drivers/gpu/drm/bridge/lvds-encoder.c
+@@ -13,13 +13,37 @@
+
+ #include <linux/of_graph.h>
+
++struct lvds_encoder {
++ struct drm_bridge bridge;
++ struct drm_bridge *panel_bridge;
++};
++
++static int lvds_encoder_attach(struct drm_bridge *bridge)
++{
++ struct lvds_encoder *lvds_encoder = container_of(bridge,
++ struct lvds_encoder,
++ bridge);
++
++ return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge,
++ bridge);
++}
++
++static struct drm_bridge_funcs funcs = {
++ .attach = lvds_encoder_attach,
++};
++
+ static int lvds_encoder_probe(struct platform_device *pdev)
+ {
+ struct device_node *port;
+ struct device_node *endpoint;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+- struct drm_bridge *bridge;
++ struct lvds_encoder *lvds_encoder;
++
++ lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder),
++ GFP_KERNEL);
++ if (!lvds_encoder)
++ return -ENOMEM;
+
+ /* Locate the panel DT node. */
+ port = of_graph_get_port_by_id(pdev->dev.of_node, 1);
+@@ -49,20 +73,30 @@ static int lvds_encoder_probe(struct platform_device *pdev)
+ return -EPROBE_DEFER;
+ }
+
+- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
+- if (IS_ERR(bridge))
+- return PTR_ERR(bridge);
++ lvds_encoder->panel_bridge =
++ devm_drm_panel_bridge_add(&pdev->dev,
++ panel, DRM_MODE_CONNECTOR_LVDS);
++ if (IS_ERR(lvds_encoder->panel_bridge))
++ return PTR_ERR(lvds_encoder->panel_bridge);
++
++ /* The panel_bridge bridge is attached to the panel's of_node,
++ * but we need a bridge attached to our of_node for our user
++ * to look up.
++ */
++ lvds_encoder->bridge.of_node = pdev->dev.of_node;
++ lvds_encoder->bridge.funcs = &funcs;
++ drm_bridge_add(&lvds_encoder->bridge);
+
+- platform_set_drvdata(pdev, bridge);
++ platform_set_drvdata(pdev, lvds_encoder);
+
+ return 0;
+ }
+
+ static int lvds_encoder_remove(struct platform_device *pdev)
+ {
+- struct drm_bridge *bridge = platform_get_drvdata(pdev);
++ struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev);
+
+- drm_bridge_remove(bridge);
++ drm_bridge_remove(&lvds_encoder->bridge);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index 8571cfd877c5..8636e7eeb731 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -97,7 +97,7 @@
+ #define DP0_ACTIVEVAL 0x0650
+ #define DP0_SYNCVAL 0x0654
+ #define DP0_MISC 0x0658
+-#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */
++#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
+ #define BPC_6 (0 << 5)
+ #define BPC_8 (1 << 5)
+
+@@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
+ tmp = (tmp << 8) | buf[i];
+ i++;
+ if (((i % 4) == 0) || (i == size)) {
+- tc_write(DP0_AUXWDATA(i >> 2), tmp);
++ tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
+ tmp = 0;
+ }
+ }
+@@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc)
+ ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
+ if (ret < 0)
+ goto err_dpcd_read;
+- if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
+- goto err_dpcd_inval;
++ if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
++ dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
++ tc->link.base.rate = 270000;
++ }
++
++ if (tc->link.base.num_lanes > 2) {
++ dev_dbg(tc->dev, "Falling to 2 lanes\n");
++ tc->link.base.num_lanes = 2;
++ }
+
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
+ if (ret < 0)
+@@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc)
+ err_dpcd_read:
+ dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
+ return ret;
+-err_dpcd_inval:
+- dev_err(tc->dev, "invalid DPCD\n");
+- return -EINVAL;
+ }
+
+ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+@@ -655,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+ int lower_margin = mode->vsync_start - mode->vdisplay;
+ int vsync_len = mode->vsync_end - mode->vsync_start;
+
++ /*
++ * Recommended maximum number of symbols transferred in a transfer unit:
++ * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
++ * (output active video bandwidth in bytes))
++ * Must be less than tu_size.
++ */
++ max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
++
+ dev_dbg(tc->dev, "set mode %dx%d\n",
+ mode->hdisplay, mode->vdisplay);
+ dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
+@@ -664,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+ dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
+
+
+- /* LCD Ctl Frame Size */
+- tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
++ /*
++ * LCD Ctl Frame Size
++ * datasheet is not clear of vsdelay in case of DPI
++ * assume we do not need any delay when DPI is a source of
++ * sync signals
++ */
++ tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
+ OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
+- tc_write(HTIM01, (left_margin << 16) | /* H back porch */
+- (hsync_len << 0)); /* Hsync */
+- tc_write(HTIM02, (right_margin << 16) | /* H front porch */
+- (mode->hdisplay << 0)); /* width */
++ tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
++ (ALIGN(hsync_len, 2) << 0)); /* Hsync */
++ tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */
++ (ALIGN(mode->hdisplay, 2) << 0)); /* width */
+ tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
+ (vsync_len << 0)); /* Vsync */
+ tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
+@@ -689,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+ /* DP Main Stream Attributes */
+ vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
+ tc_write(DP0_VIDSYNCDELAY,
+- (0x003e << 16) | /* thresh_dly */
++ (max_tu_symbol << 16) | /* thresh_dly */
+ (vid_sync_dly << 0));
+
+ tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
+@@ -705,14 +722,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+ tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
+ DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
+
+- /*
+- * Recommended maximum number of symbols transferred in a transfer unit:
+- * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+- * (output active video bandwidth in bytes))
+- * Must be less than tu_size.
+- */
+- max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+- tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
++ tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
++ BPC_8);
+
+ return 0;
+ err:
+@@ -808,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc)
+ unsigned int rate;
+ u32 dp_phy_ctrl;
+ int timeout;
+- bool aligned;
+- bool ready;
+ u32 value;
+ int ret;
+ u8 tmp[8];
+@@ -954,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc)
+ ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
+ if (ret < 0)
+ goto err_dpcd_read;
+- ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
+- DP_CHANNEL_EQ_BITS)); /* Lane0 */
+- aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
+- } while ((--timeout) && !(ready && aligned));
++ } while ((--timeout) &&
++ !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes)));
+
+ if (timeout == 0) {
+ /* Read DPCD 0x200-0x201 */
+ ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
+ if (ret < 0)
+ goto err_dpcd_read;
++ dev_err(dev, "channel(s) EQ not ok\n");
+ dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
+ dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
+ tmp[1]);
+@@ -974,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc)
+ dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
+ tmp[6]);
+
+- if (!ready)
+- dev_err(dev, "Lane0/1 not ready\n");
+- if (!aligned)
+- dev_err(dev, "Lane0/1 not aligned\n");
+ return -EAGAIN;
+ }
+
+@@ -1099,7 +1103,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
+ static int tc_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+- /* Accept any mode */
++ /* DPI interface clock limitation: upto 154 MHz */
++ if (mode->clock > 154000)
++ return MODE_CLOCK_HIGH;
++
+ return MODE_OK;
+ }
+
+diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
+index c226da145fb3..a349cb61961e 100644
+--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
++++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
+@@ -35,6 +35,7 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV
+
+ config DRM_OMAP_PANEL_DPI
+ tristate "Generic DPI panel"
++ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Driver for generic DPI panels.
+
+diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+index 1dd3dafc59af..c60a85e82c6d 100644
+--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+@@ -638,7 +638,8 @@ static int omap_dmm_probe(struct platform_device *dev)
+ match = of_match_node(dmm_of_match, dev->dev.of_node);
+ if (!match) {
+ dev_err(&dev->dev, "failed to find matching device node\n");
+- return -ENODEV;
++ ret = -ENODEV;
++ goto fail;
+ }
+
+ omap_dmm->plat_data = match->data;
+diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+index 9a20b9dc27c8..f7fc652b0027 100644
+--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
++++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+@@ -1275,8 +1275,6 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
+ goto err_pllref;
+ }
+
+- pm_runtime_enable(dev);
+-
+ dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
+ dsi->dsi_host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi->dsi_host);
+@@ -1291,6 +1289,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
+ }
+
+ dev_set_drvdata(dev, dsi);
++ pm_runtime_enable(dev);
+ return 0;
+
+ err_mipi_dsi_host:
+diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
+index 7d7af3a93d94..521addec831e 100644
+--- a/drivers/gpu/drm/vc4/vc4_irq.c
++++ b/drivers/gpu/drm/vc4/vc4_irq.c
+@@ -225,6 +225,9 @@ vc4_irq_uninstall(struct drm_device *dev)
+ /* Clear any pending interrupts we might have left. */
+ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
+
++ /* Finish any interrupt handler still in flight. */
++ disable_irq(dev->irq);
++
+ cancel_work_sync(&vc4->overflow_mem_work);
+ }
+
+diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
+index 622cd43840b8..493f392b3a0a 100644
+--- a/drivers/gpu/drm/vc4/vc4_v3d.c
++++ b/drivers/gpu/drm/vc4/vc4_v3d.c
+@@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev)
+ return ret;
+
+ vc4_v3d_init_hw(vc4->dev);
++
++ /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
++ enable_irq(vc4->dev->irq);
+ vc4_irq_postinstall(vc4->dev);
+
+ return 0;
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 906e654fb0ba..65f1cfbbe7fe 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -2340,23 +2340,23 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
+ int i;
+ unsigned long flags;
+
+- spin_lock_irqsave(&remote->remote_lock, flags);
+- remote->remotes[index].registered = false;
+- spin_unlock_irqrestore(&remote->remote_lock, flags);
++ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
++ if (remote->remotes[i].serial == serial) {
+
+- if (remote->remotes[index].battery.battery)
+- devres_release_group(&wacom->hdev->dev,
+- &remote->remotes[index].battery.bat_desc);
++ spin_lock_irqsave(&remote->remote_lock, flags);
++ remote->remotes[i].registered = false;
++ spin_unlock_irqrestore(&remote->remote_lock, flags);
+
+- if (remote->remotes[index].group.name)
+- devres_release_group(&wacom->hdev->dev,
+- &remote->remotes[index]);
++ if (remote->remotes[i].battery.battery)
++ devres_release_group(&wacom->hdev->dev,
++ &remote->remotes[i].battery.bat_desc);
++
++ if (remote->remotes[i].group.name)
++ devres_release_group(&wacom->hdev->dev,
++ &remote->remotes[i]);
+
+- for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+- if (remote->remotes[i].serial == serial) {
+ remote->remotes[i].serial = 0;
+ remote->remotes[i].group.name = NULL;
+- remote->remotes[i].registered = false;
+ remote->remotes[i].battery.battery = NULL;
+ wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
+ }
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index aa692e28b2cd..70cbe1e5a3d2 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1924,7 +1924,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
+ struct wacom_features *features = &wacom_wac->features;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+ int i;
+- bool is_touch_on = value;
+ bool do_report = false;
+
+ /*
+@@ -1969,16 +1968,17 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
+ break;
+
+ case WACOM_HID_WD_MUTE_DEVICE:
+- if (wacom_wac->shared->touch_input && value) {
+- wacom_wac->shared->is_touch_on = !wacom_wac->shared->is_touch_on;
+- is_touch_on = wacom_wac->shared->is_touch_on;
+- }
+-
+- /* fall through*/
+ case WACOM_HID_WD_TOUCHONOFF:
+ if (wacom_wac->shared->touch_input) {
++ bool *is_touch_on = &wacom_wac->shared->is_touch_on;
++
++ if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value)
++ *is_touch_on = !(*is_touch_on);
++ else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)
++ *is_touch_on = value;
++
+ input_report_switch(wacom_wac->shared->touch_input,
+- SW_MUTE_DEVICE, !is_touch_on);
++ SW_MUTE_DEVICE, !(*is_touch_on));
+ input_sync(wacom_wac->shared->touch_input);
+ }
+ break;
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index 52a58b8b6e1b..a139940cd991 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -21,6 +21,7 @@
+
+ #include <linux/debugfs.h>
+ #include <linux/kernel.h>
++#include <linux/math64.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/err.h>
+@@ -499,8 +500,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
+ static long pmbus_reg2data_direct(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
+ {
+- long val = (s16) sensor->data;
+- long m, b, R;
++ s64 b, val = (s16)sensor->data;
++ s32 m, R;
+
+ m = data->info->m[sensor->class];
+ b = data->info->b[sensor->class];
+@@ -528,11 +529,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
+ R--;
+ }
+ while (R < 0) {
+- val = DIV_ROUND_CLOSEST(val, 10);
++ val = div_s64(val + 5LL, 10L); /* round closest */
+ R++;
+ }
+
+- return (val - b) / m;
++ val = div_s64(val - b, m);
++ return clamp_val(val, LONG_MIN, LONG_MAX);
+ }
+
+ /*
+@@ -656,7 +658,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
+ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
+ struct pmbus_sensor *sensor, long val)
+ {
+- long m, b, R;
++ s64 b, val64 = val;
++ s32 m, R;
+
+ m = data->info->m[sensor->class];
+ b = data->info->b[sensor->class];
+@@ -673,18 +676,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
+ R -= 3; /* Adjust R and b for data in milli-units */
+ b *= 1000;
+ }
+- val = val * m + b;
++ val64 = val64 * m + b;
+
+ while (R > 0) {
+- val *= 10;
++ val64 *= 10;
+ R--;
+ }
+ while (R < 0) {
+- val = DIV_ROUND_CLOSEST(val, 10);
++ val64 = div_s64(val64 + 5LL, 10L); /* round closest */
+ R++;
+ }
+
+- return val;
++ return (u16)clamp_val(val64, S16_MIN, S16_MAX);
+ }
+
+ static u16 pmbus_data2reg_vid(struct pmbus_data *data,
+diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
+index 31186ead5a40..509a6007cdf6 100644
+--- a/drivers/i2c/i2c-boardinfo.c
++++ b/drivers/i2c/i2c-boardinfo.c
+@@ -86,6 +86,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
+ property_entries_dup(info->properties);
+ if (IS_ERR(devinfo->board_info.properties)) {
+ status = PTR_ERR(devinfo->board_info.properties);
++ kfree(devinfo);
+ break;
+ }
+ }
+@@ -98,6 +99,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
+ GFP_KERNEL);
+ if (!devinfo->board_info.resources) {
+ status = -ENOMEM;
++ kfree(devinfo);
+ break;
+ }
+ }
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
+index 4df32cf1650e..172753b14a4f 100644
+--- a/drivers/iio/adc/stm32-adc.c
++++ b/drivers/iio/adc/stm32-adc.c
+@@ -1314,6 +1314,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
+ {
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ unsigned int watermark = STM32_DMA_BUFFER_SIZE / 2;
++ unsigned int rx_buf_sz = STM32_DMA_BUFFER_SIZE;
+
+ /*
+ * dma cyclic transfers are used, buffer is split into two periods.
+@@ -1322,7 +1323,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
+ * - one buffer (period) driver can push with iio_trigger_poll().
+ */
+ watermark = min(watermark, val * (unsigned)(sizeof(u16)));
+- adc->rx_buf_sz = watermark * 2;
++ adc->rx_buf_sz = min(rx_buf_sz, watermark * 2 * adc->num_conv);
+
+ return 0;
+ }
+diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
+index 840a6cbd5f0f..8cfac6d1cec4 100644
+--- a/drivers/iio/chemical/ccs811.c
++++ b/drivers/iio/chemical/ccs811.c
+@@ -91,7 +91,6 @@ static const struct iio_chan_spec ccs811_channels[] = {
+ .channel2 = IIO_MOD_CO2,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+- BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+@@ -245,24 +244,18 @@ static int ccs811_read_raw(struct iio_dev *indio_dev,
+ switch (chan->channel2) {
+ case IIO_MOD_CO2:
+ *val = 0;
+- *val2 = 12834;
++ *val2 = 100;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_MOD_VOC:
+ *val = 0;
+- *val2 = 84246;
+- return IIO_VAL_INT_PLUS_MICRO;
++ *val2 = 100;
++ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+- case IIO_CHAN_INFO_OFFSET:
+- if (!(chan->type == IIO_CONCENTRATION &&
+- chan->channel2 == IIO_MOD_CO2))
+- return -EINVAL;
+- *val = -400;
+- return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
+index 141ea228aac6..f5954981e9ee 100644
+--- a/drivers/input/rmi4/rmi_driver.c
++++ b/drivers/input/rmi4/rmi_driver.c
+@@ -41,6 +41,13 @@ void rmi_free_function_list(struct rmi_device *rmi_dev)
+
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n");
+
++ /* Doing it in the reverse order so F01 will be removed last */
++ list_for_each_entry_safe_reverse(fn, tmp,
++ &data->function_list, node) {
++ list_del(&fn->node);
++ rmi_unregister_function(fn);
++ }
++
+ devm_kfree(&rmi_dev->dev, data->irq_memory);
+ data->irq_memory = NULL;
+ data->irq_status = NULL;
+@@ -50,13 +57,6 @@ void rmi_free_function_list(struct rmi_device *rmi_dev)
+
+ data->f01_container = NULL;
+ data->f34_container = NULL;
+-
+- /* Doing it in the reverse order so F01 will be removed last */
+- list_for_each_entry_safe_reverse(fn, tmp,
+- &data->function_list, node) {
+- list_del(&fn->node);
+- rmi_unregister_function(fn);
+- }
+ }
+
+ static int reset_one_function(struct rmi_function *fn)
+diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
+index ad71a5e768dc..7ccbb370a9a8 100644
+--- a/drivers/input/rmi4/rmi_f03.c
++++ b/drivers/input/rmi4/rmi_f03.c
+@@ -32,6 +32,7 @@ struct f03_data {
+ struct rmi_function *fn;
+
+ struct serio *serio;
++ bool serio_registered;
+
+ unsigned int overwrite_buttons;
+
+@@ -138,6 +139,37 @@ static int rmi_f03_initialize(struct f03_data *f03)
+ return 0;
+ }
+
++static int rmi_f03_pt_open(struct serio *serio)
++{
++ struct f03_data *f03 = serio->port_data;
++ struct rmi_function *fn = f03->fn;
++ const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE;
++ const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET;
++ u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE];
++ int error;
++
++ /*
++ * Consume any pending data. Some devices like to spam with
++ * 0xaa 0x00 announcements which may confuse us as we try to
++ * probe the device.
++ */
++ error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len);
++ if (!error)
++ rmi_dbg(RMI_DEBUG_FN, &fn->dev,
++ "%s: Consumed %*ph (%d) from PS2 guest\n",
++ __func__, ob_len, obs, ob_len);
++
++ return fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask);
++}
++
++static void rmi_f03_pt_close(struct serio *serio)
++{
++ struct f03_data *f03 = serio->port_data;
++ struct rmi_function *fn = f03->fn;
++
++ fn->rmi_dev->driver->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
++}
++
+ static int rmi_f03_register_pt(struct f03_data *f03)
+ {
+ struct serio *serio;
+@@ -148,6 +180,8 @@ static int rmi_f03_register_pt(struct f03_data *f03)
+
+ serio->id.type = SERIO_PS_PSTHRU;
+ serio->write = rmi_f03_pt_write;
++ serio->open = rmi_f03_pt_open;
++ serio->close = rmi_f03_pt_close;
+ serio->port_data = f03;
+
+ strlcpy(serio->name, "Synaptics RMI4 PS/2 pass-through",
+@@ -184,17 +218,27 @@ static int rmi_f03_probe(struct rmi_function *fn)
+ f03->device_count);
+
+ dev_set_drvdata(dev, f03);
+-
+- error = rmi_f03_register_pt(f03);
+- if (error)
+- return error;
+-
+ return 0;
+ }
+
+ static int rmi_f03_config(struct rmi_function *fn)
+ {
+- fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask);
++ struct f03_data *f03 = dev_get_drvdata(&fn->dev);
++ int error;
++
++ if (!f03->serio_registered) {
++ error = rmi_f03_register_pt(f03);
++ if (error)
++ return error;
++
++ f03->serio_registered = true;
++ } else {
++ /*
++ * We must be re-configuring the sensor, just enable
++ * interrupts for this function.
++ */
++ fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask);
++ }
+
+ return 0;
+ }
+@@ -204,7 +248,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct f03_data *f03 = dev_get_drvdata(&fn->dev);
+- u16 data_addr = fn->fd.data_base_addr;
++ const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET;
+ const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE;
+ u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE];
+ u8 ob_status;
+@@ -226,8 +270,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
+ drvdata->attn_data.size -= ob_len;
+ } else {
+ /* Grab all of the data registers, and check them for data */
+- error = rmi_read_block(fn->rmi_dev, data_addr + RMI_F03_OB_OFFSET,
+- &obs, ob_len);
++ error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len);
+ if (error) {
+ dev_err(&fn->dev,
+ "%s: Failed to read F03 output buffers: %d\n",
+@@ -266,7 +309,8 @@ static void rmi_f03_remove(struct rmi_function *fn)
+ {
+ struct f03_data *f03 = dev_get_drvdata(&fn->dev);
+
+- serio_unregister_port(f03->serio);
++ if (f03->serio_registered)
++ serio_unregister_port(f03->serio);
+ }
+
+ struct rmi_function_handler rmi_f03_handler = {
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 658c54b3b07a..1598d1e04989 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -807,7 +807,10 @@ int bch_btree_cache_alloc(struct cache_set *c)
+ c->shrink.scan_objects = bch_mca_scan;
+ c->shrink.seeks = 4;
+ c->shrink.batch = c->btree_pages * 2;
+- register_shrinker(&c->shrink);
++
++ if (register_shrinker(&c->shrink))
++ pr_warn("bcache: %s: could not register shrinker",
++ __func__);
+
+ return 0;
+ }
+diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
+index f06f09a0876e..71fb5734995b 100644
+--- a/drivers/media/usb/usbtv/usbtv-core.c
++++ b/drivers/media/usb/usbtv/usbtv-core.c
+@@ -144,6 +144,7 @@ static void usbtv_disconnect(struct usb_interface *intf)
+
+ static const struct usb_device_id usbtv_id_table[] = {
+ { USB_DEVICE(0x1b71, 0x3002) },
++ { USB_DEVICE(0x1f71, 0x3301) },
+ {}
+ };
+ MODULE_DEVICE_TABLE(usb, usbtv_id_table);
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 78b3172c8e6e..d46cb1f0868f 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -238,8 +238,11 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ */
+ mei_me_set_pm_domain(dev);
+
+- if (mei_pg_is_enabled(dev))
++ if (mei_pg_is_enabled(dev)) {
+ pm_runtime_put_noidle(&pdev->dev);
++ if (hw->d0i3_supported)
++ pm_runtime_allow(&pdev->dev);
++ }
+
+ dev_dbg(&pdev->dev, "initialization successful.\n");
+
+diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
+index 81370c79aa48..7ad0db65a6fa 100644
+--- a/drivers/mtd/nand/denali_pci.c
++++ b/drivers/mtd/nand/denali_pci.c
+@@ -124,3 +124,7 @@ static struct pci_driver denali_pci_driver = {
+ };
+
+ module_pci_driver(denali_pci_driver);
++
++MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
++MODULE_AUTHOR("Intel Corporation and its suppliers");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 3cbe771b3352..a22336fef66b 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -2133,8 +2133,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
+ /* Read A2 portion of the EEPROM */
+ if (length) {
+ start -= ETH_MODULE_SFF_8436_LEN;
+- bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
+- length, data);
++ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
++ start, length, data);
+ }
+ return rc;
+ }
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 667dbc7d4a4e..d1a44a84c97e 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -3331,7 +3331,7 @@ static int __igb_close(struct net_device *netdev, bool suspending)
+
+ int igb_close(struct net_device *netdev)
+ {
+- if (netif_device_present(netdev))
++ if (netif_device_present(netdev) || netdev->dismantle)
+ return __igb_close(netdev, false);
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
+index d147dc7d0f77..1dd3a1264a53 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -5597,7 +5597,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
+ sizeof(*txq_pcpu->buffs),
+ GFP_KERNEL);
+ if (!txq_pcpu->buffs)
+- goto cleanup;
++ return -ENOMEM;
+
+ txq_pcpu->count = 0;
+ txq_pcpu->reserved_num = 0;
+@@ -5610,26 +5610,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
+ &txq_pcpu->tso_headers_dma,
+ GFP_KERNEL);
+ if (!txq_pcpu->tso_headers)
+- goto cleanup;
++ return -ENOMEM;
+ }
+
+ return 0;
+-cleanup:
+- for_each_present_cpu(cpu) {
+- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+- kfree(txq_pcpu->buffs);
+-
+- dma_free_coherent(port->dev->dev.parent,
+- txq_pcpu->size * TSO_HEADER_SIZE,
+- txq_pcpu->tso_headers,
+- txq_pcpu->tso_headers_dma);
+- }
+-
+- dma_free_coherent(port->dev->dev.parent,
+- txq->size * MVPP2_DESC_ALIGNED_SIZE,
+- txq->descs, txq->descs_dma);
+-
+- return -ENOMEM;
+ }
+
+ /* Free allocated TXQ resources */
+diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
+index 6d68c8a8f4f2..da4ec575ccf9 100644
+--- a/drivers/net/ethernet/xilinx/Kconfig
++++ b/drivers/net/ethernet/xilinx/Kconfig
+@@ -34,6 +34,7 @@ config XILINX_AXI_EMAC
+ config XILINX_LL_TEMAC
+ tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
+ depends on (PPC || MICROBLAZE)
++ depends on !64BIT || BROKEN
+ select PHYLIB
+ ---help---
+ This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
+diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
+index aebc08beceba..21b3f36e023a 100644
+--- a/drivers/net/phy/marvell10g.c
++++ b/drivers/net/phy/marvell10g.c
+@@ -16,6 +16,7 @@
+ * link takes priority and the other port is completely locked out.
+ */
+ #include <linux/phy.h>
++#include <linux/marvell_phy.h>
+
+ enum {
+ MV_PCS_BASE_T = 0x0000,
+@@ -338,7 +339,7 @@ static int mv3310_read_status(struct phy_device *phydev)
+ static struct phy_driver mv3310_drivers[] = {
+ {
+ .phy_id = 0x002b09aa,
+- .phy_id_mask = 0xffffffff,
++ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "mv88x3310",
+ .features = SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Full |
+@@ -360,7 +361,7 @@ static struct phy_driver mv3310_drivers[] = {
+ module_phy_driver(mv3310_drivers);
+
+ static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
+- { 0x002b09aa, 0xffffffff },
++ { 0x002b09aa, MARVELL_PHY_ID_MASK },
+ { },
+ };
+ MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
+diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
+index f0439f2d566b..173891b11b2d 100644
+--- a/drivers/net/wireless/ath/ath9k/channel.c
++++ b/drivers/net/wireless/ath/ath9k/channel.c
+@@ -1112,7 +1112,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
+ if (!avp->assoc)
+ return false;
+
+- skb = ieee80211_nullfunc_get(sc->hw, vif);
++ skb = ieee80211_nullfunc_get(sc->hw, vif, false);
+ if (!skb)
+ return false;
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+index 9c889a32fe24..223fb77a3aa9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+@@ -209,8 +209,6 @@ static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
+
+ static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
+ {
+- iwl_fw_dbg_stop_recording(fwrt);
+-
+ fwrt->dump.conf = FW_DBG_INVALID;
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+index 53e269d54050..0ae7624eac9d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+@@ -1181,6 +1181,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
+ return le32_to_cpu(txq_timer->p2p_go);
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return le32_to_cpu(txq_timer->p2p_device);
++ case NL80211_IFTYPE_MONITOR:
++ return default_timeout;
+ default:
+ WARN_ON(1);
+ return mvm->cfg->base_params->wd_timeout;
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+index c59f4581e972..ac05fd1e74c4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -49,6 +49,7 @@
+ *
+ *****************************************************************************/
+ #include "iwl-trans.h"
++#include "iwl-prph.h"
+ #include "iwl-context-info.h"
+ #include "internal.h"
+
+@@ -156,6 +157,11 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
+
+ trans_pcie->is_down = true;
+
++ /* Stop dbgc before stopping device */
++ iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
++ udelay(100);
++ iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
++
+ /* tell the device to stop sending interrupts */
+ iwl_disable_interrupts(trans);
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 2e3e013ec95a..12a9b86d71ea 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1138,6 +1138,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+
+ trans_pcie->is_down = true;
+
++ /* Stop dbgc before stopping device */
++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
++ iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
++ } else {
++ iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
++ udelay(100);
++ iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
++ }
++
+ /* tell the device to stop sending interrupts */
+ iwl_disable_interrupts(trans);
+
+diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
+index a52224836a2b..666b88cb2cfe 100644
+--- a/drivers/net/wireless/st/cw1200/sta.c
++++ b/drivers/net/wireless/st/cw1200/sta.c
+@@ -198,7 +198,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
+
+ priv->bss_loss_state++;
+
+- skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
++ skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
+ WARN_ON(!skb);
+ if (skb)
+ cw1200_tx(priv->hw, NULL, skb);
+@@ -2266,7 +2266,7 @@ static int cw1200_upload_null(struct cw1200_common *priv)
+ .rate = 0xFF,
+ };
+
+- frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
++ frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
+ if (!frame.skb)
+ return -ENOMEM;
+
+diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
+index 9915d83a4a30..6d02c660b4ab 100644
+--- a/drivers/net/wireless/ti/wl1251/main.c
++++ b/drivers/net/wireless/ti/wl1251/main.c
+@@ -566,7 +566,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)
+ size = sizeof(struct wl12xx_null_data_template);
+ ptr = NULL;
+ } else {
+- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
++ skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);
+ if (!skb)
+ goto out;
+ size = skb->len;
+diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
+index 2bfc12fdc929..761cf8573a80 100644
+--- a/drivers/net/wireless/ti/wlcore/cmd.c
++++ b/drivers/net/wireless/ti/wlcore/cmd.c
+@@ -1069,7 +1069,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+ ptr = NULL;
+ } else {
+ skb = ieee80211_nullfunc_get(wl->hw,
+- wl12xx_wlvif_to_vif(wlvif));
++ wl12xx_wlvif_to_vif(wlvif),
++ false);
+ if (!skb)
+ goto out;
+ size = skb->len;
+@@ -1096,7 +1097,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+ struct sk_buff *skb = NULL;
+ int ret = -ENOMEM;
+
+- skb = ieee80211_nullfunc_get(wl->hw, vif);
++ skb = ieee80211_nullfunc_get(wl->hw, vif, false);
+ if (!skb)
+ goto out;
+
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 8b8689c6d887..391432e2725d 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -87,6 +87,8 @@ struct netfront_cb {
+ /* IRQ name is queue name with "-tx" or "-rx" appended */
+ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+
++static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
++
+ struct netfront_stats {
+ u64 packets;
+ u64 bytes;
+@@ -2021,10 +2023,12 @@ static void netback_changed(struct xenbus_device *dev,
+ break;
+
+ case XenbusStateClosed:
++ wake_up_all(&module_unload_q);
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
+ case XenbusStateClosing:
++ wake_up_all(&module_unload_q);
+ xenbus_frontend_closed(dev);
+ break;
+ }
+@@ -2130,6 +2134,20 @@ static int xennet_remove(struct xenbus_device *dev)
+
+ dev_dbg(&dev->dev, "%s\n", dev->nodename);
+
++ if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
++ xenbus_switch_state(dev, XenbusStateClosing);
++ wait_event(module_unload_q,
++ xenbus_read_driver_state(dev->otherend) ==
++ XenbusStateClosing);
++
++ xenbus_switch_state(dev, XenbusStateClosed);
++ wait_event(module_unload_q,
++ xenbus_read_driver_state(dev->otherend) ==
++ XenbusStateClosed ||
++ xenbus_read_driver_state(dev->otherend) ==
++ XenbusStateUnknown);
++ }
++
+ xennet_disconnect_backend(info);
+
+ unregister_netdev(info->netdev);
+diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
+index bf33663218cd..9ff8529a64a9 100644
+--- a/drivers/nvme/host/fabrics.h
++++ b/drivers/nvme/host/fabrics.h
+@@ -142,4 +142,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts);
+ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
+ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
+
++static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
++ struct request *rq)
++{
++ struct nvme_command *cmd = nvme_req(rq)->cmd;
++
++ /*
++ * We cannot accept any other command until the connect command has
++ * completed, so only allow connect to pass.
++ */
++ if (!blk_rq_is_passthrough(rq) ||
++ cmd->common.opcode != nvme_fabrics_command ||
++ cmd->fabrics.fctype != nvme_fabrics_type_connect) {
++ /*
++ * Reconnecting state means transport disruption, which can take
++ * a long time and even might fail permanently, fail fast to
++ * give upper layers a chance to failover.
++ * Deleting state means that the ctrl will never accept commands
++ * again, fail it permanently.
++ */
++ if (ctrl->state == NVME_CTRL_RECONNECTING ||
++ ctrl->state == NVME_CTRL_DELETING) {
++ nvme_req(rq)->status = NVME_SC_ABORT_REQ;
++ return BLK_STS_IOERR;
++ }
++ return BLK_STS_RESOURCE; /* try again later */
++ }
++
++ return BLK_STS_OK;
++}
++
+ #endif /* _NVME_FABRICS_H */
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index be49d0f79381..3148d760d825 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -41,6 +41,7 @@
+
+ enum nvme_fc_queue_flags {
+ NVME_FC_Q_CONNECTED = (1 << 0),
++ NVME_FC_Q_LIVE = (1 << 1),
+ };
+
+ #define NVMEFC_QUEUE_DELAY 3 /* ms units */
+@@ -1654,6 +1655,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
+ if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
+ return;
+
++ clear_bit(NVME_FC_Q_LIVE, &queue->flags);
+ /*
+ * Current implementation never disconnects a single queue.
+ * It always terminates a whole association. So there is never
+@@ -1661,7 +1663,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
+ */
+
+ queue->connection_id = 0;
+- clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+ }
+
+ static void
+@@ -1740,6 +1741,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ break;
++
++ set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
+ }
+
+ return ret;
+@@ -2048,6 +2051,14 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
+ return BLK_STS_RESOURCE;
+ }
+
++static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
++ struct request *rq)
++{
++ if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
++ return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
++ return BLK_STS_OK;
++}
++
+ static blk_status_t
+ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+@@ -2063,6 +2074,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
+ u32 data_len;
+ blk_status_t ret;
+
++ ret = nvme_fc_is_ready(queue, rq);
++ if (unlikely(ret))
++ return ret;
++
+ ret = nvme_setup_cmd(ns, rq, sqe);
+ if (ret)
+ return ret;
+@@ -2398,6 +2413,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
+ if (ret)
+ goto out_disconnect_admin_queue;
+
++ set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
++
+ /*
+ * Check controller capabilities
+ *
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 75539f7c58b9..cdd2fd509ddc 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1617,6 +1617,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
+ dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
+ dev->host_mem_descs, dev->host_mem_descs_dma);
+ dev->host_mem_descs = NULL;
++ dev->nr_host_mem_descs = 0;
+ }
+
+ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
+@@ -1645,7 +1646,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
+ if (!bufs)
+ goto out_free_descs;
+
+- for (size = 0; size < preferred; size += len) {
++ for (size = 0; size < preferred && i < max_entries; size += len) {
+ dma_addr_t dma_addr;
+
+ len = min_t(u64, chunk_size, preferred - size);
+@@ -2282,7 +2283,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
+ return -ENODEV;
+ }
+
+-static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
++static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ {
+ if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
+ /*
+@@ -2297,6 +2298,14 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
+ (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
+ dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
+ return NVME_QUIRK_NO_DEEPEST_PS;
++ } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
++ /*
++ * Samsung SSD 960 EVO drops off the PCIe bus after system
++ * suspend on a Ryzen board, ASUS PRIME B350M-A.
++ */
++ if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
++ dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
++ return NVME_QUIRK_NO_APST;
+ }
+
+ return 0;
+@@ -2336,7 +2345,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (result)
+ goto unmap;
+
+- quirks |= check_dell_samsung_bug(pdev);
++ quirks |= check_vendor_combination_bug(pdev);
+
+ result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
+ quirks);
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 0ebb539f3bd3..33d4431c2b4b 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -67,6 +67,9 @@ struct nvme_rdma_request {
+ struct nvme_request req;
+ struct ib_mr *mr;
+ struct nvme_rdma_qe sqe;
++ union nvme_result result;
++ __le16 status;
++ refcount_t ref;
+ struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
+ u32 num_sge;
+ int nents;
+@@ -1177,6 +1180,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
+ req->num_sge = 1;
+ req->inline_data = false;
+ req->mr->need_inval = false;
++ refcount_set(&req->ref, 2); /* send and recv completions */
+
+ c->common.flags |= NVME_CMD_SGL_METABUF;
+
+@@ -1213,8 +1217,19 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
+
+ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+ {
+- if (unlikely(wc->status != IB_WC_SUCCESS))
++ struct nvme_rdma_qe *qe =
++ container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
++ struct nvme_rdma_request *req =
++ container_of(qe, struct nvme_rdma_request, sqe);
++ struct request *rq = blk_mq_rq_from_pdu(req);
++
++ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvme_rdma_wr_error(cq, wc, "SEND");
++ return;
++ }
++
++ if (refcount_dec_and_test(&req->ref))
++ nvme_end_request(rq, req->status, req->result);
+ }
+
+ /*
+@@ -1359,14 +1374,19 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
+ }
+ req = blk_mq_rq_to_pdu(rq);
+
+- if (rq->tag == tag)
+- ret = 1;
++ req->status = cqe->status;
++ req->result = cqe->result;
+
+ if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
+ wc->ex.invalidate_rkey == req->mr->rkey)
+ req->mr->need_inval = false;
+
+- nvme_end_request(rq, cqe->status, cqe->result);
++ if (refcount_dec_and_test(&req->ref)) {
++ if (rq->tag == tag)
++ ret = 1;
++ nvme_end_request(rq, req->status, req->result);
++ }
++
+ return ret;
+ }
+
+@@ -1603,31 +1623,11 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
+ * We cannot accept any other command until the Connect command has completed.
+ */
+ static inline blk_status_t
+-nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
+-{
+- if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
+- struct nvme_command *cmd = nvme_req(rq)->cmd;
+-
+- if (!blk_rq_is_passthrough(rq) ||
+- cmd->common.opcode != nvme_fabrics_command ||
+- cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+- /*
+- * reconnecting state means transport disruption, which
+- * can take a long time and even might fail permanently,
+- * fail fast to give upper layers a chance to failover.
+- * deleting state means that the ctrl will never accept
+- * commands again, fail it permanently.
+- */
+- if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
+- queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
+- nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+- return BLK_STS_IOERR;
+- }
+- return BLK_STS_RESOURCE; /* try again later */
+- }
+- }
+-
+- return 0;
++nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
++{
++ if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
++ return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
++ return BLK_STS_OK;
+ }
+
+ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+@@ -1646,7 +1646,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+
+ WARN_ON_ONCE(rq->tag < 0);
+
+- ret = nvme_rdma_queue_is_ready(queue, rq);
++ ret = nvme_rdma_is_ready(queue, rq);
+ if (unlikely(ret))
+ return ret;
+
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index 58e010bdda3e..8e21211b904b 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -532,15 +532,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
+
+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
+
++ /* release the queue lookup reference on the completed IO */
++ nvmet_fc_tgt_q_put(queue);
++
+ spin_lock_irqsave(&queue->qlock, flags);
+ deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
+ struct nvmet_fc_defer_fcp_req, req_list);
+ if (!deferfcp) {
+ list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
+ spin_unlock_irqrestore(&queue->qlock, flags);
+-
+- /* Release reference taken at queue lookup and fod allocation */
+- nvmet_fc_tgt_q_put(queue);
+ return;
+ }
+
+@@ -759,6 +759,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
+ tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
+ deferfcp->fcp_req);
+
++ /* release the queue lookup reference */
++ nvmet_fc_tgt_q_put(queue);
++
+ kfree(deferfcp);
+
+ spin_lock_irqsave(&queue->qlock, flags);
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index 92628c432926..02aff5cc48bf 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -61,10 +61,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
+ return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
+ }
+
++enum nvme_loop_queue_flags {
++ NVME_LOOP_Q_LIVE = 0,
++};
++
+ struct nvme_loop_queue {
+ struct nvmet_cq nvme_cq;
+ struct nvmet_sq nvme_sq;
+ struct nvme_loop_ctrl *ctrl;
++ unsigned long flags;
+ };
+
+ static struct nvmet_port *nvmet_loop_port;
+@@ -153,6 +158,14 @@ nvme_loop_timeout(struct request *rq, bool reserved)
+ return BLK_EH_HANDLED;
+ }
+
++static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
++ struct request *rq)
++{
++ if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
++ return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
++ return BLK_STS_OK;
++}
++
+ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+ {
+@@ -162,6 +175,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+ blk_status_t ret;
+
++ ret = nvme_loop_is_ready(queue, req);
++ if (unlikely(ret))
++ return ret;
++
+ ret = nvme_setup_cmd(ns, req, &iod->cmd);
+ if (ret)
+ return ret;
+@@ -275,6 +292,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
+
+ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
+ {
++ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+ nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+@@ -305,8 +323,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
+ {
+ int i;
+
+- for (i = 1; i < ctrl->ctrl.queue_count; i++)
++ for (i = 1; i < ctrl->ctrl.queue_count; i++) {
++ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
+ nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
++ }
+ }
+
+ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
+@@ -346,6 +366,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ return ret;
++ set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
+ }
+
+ return 0;
+@@ -387,6 +408,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
+ if (error)
+ goto out_cleanup_queue;
+
++ set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
++
+ error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
+ if (error) {
+ dev_err(ctrl->ctrl.device,
+diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
+index 7549c7f74a3c..c03e96e6a041 100644
+--- a/drivers/power/reset/zx-reboot.c
++++ b/drivers/power/reset/zx-reboot.c
+@@ -82,3 +82,7 @@ static struct platform_driver zx_reboot_driver = {
+ },
+ };
+ module_platform_driver(zx_reboot_driver);
++
++MODULE_DESCRIPTION("ZTE SoCs reset driver");
++MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
+index b5f4006198b9..a9a56aa9c26b 100644
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -218,8 +218,8 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
+ weight += atomic_read(&zq->load);
+ pref_weight += atomic_read(&pref_zq->load);
+ if (weight == pref_weight)
+- return &zq->queue->total_request_count >
+- &pref_zq->queue->total_request_count;
++ return zq->queue->total_request_count >
++ pref_zq->queue->total_request_count;
+ return weight > pref_weight;
+ }
+
+diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
+index af3e4d3f9735..7173ae53c526 100644
+--- a/drivers/scsi/aacraid/aachba.c
++++ b/drivers/scsi/aacraid/aachba.c
+@@ -913,8 +913,15 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
+ memset(str, ' ', sizeof(*str));
+
+ if (sup_adap_info->adapter_type_text[0]) {
+- char *cp = sup_adap_info->adapter_type_text;
+ int c;
++ char *cp;
++ char *cname = kmemdup(sup_adap_info->adapter_type_text,
++ sizeof(sup_adap_info->adapter_type_text),
++ GFP_ATOMIC);
++ if (!cname)
++ return;
++
++ cp = cname;
+ if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
+ inqstrcpy("SMC", str->vid);
+ else {
+@@ -923,7 +930,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
+ ++cp;
+ c = *cp;
+ *cp = '\0';
+- inqstrcpy(sup_adap_info->adapter_type_text, str->vid);
++ inqstrcpy(cname, str->vid);
+ *cp = c;
+ while (*cp && *cp != ' ')
+ ++cp;
+@@ -937,8 +944,8 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
+ cp[sizeof(str->pid)] = '\0';
+ }
+ inqstrcpy (cp, str->pid);
+- if (c)
+- cp[sizeof(str->pid)] = c;
++
++ kfree(cname);
+ } else {
+ struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
+
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 525a652dab48..c0a4fcb7fd0a 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -1583,6 +1583,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
+ * will ensure that i/o is queisced and the card is flushed in that
+ * case.
+ */
++ aac_free_irq(aac);
+ aac_fib_map_free(aac);
+ dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
+ aac->comm_phys);
+@@ -1590,7 +1591,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
+ aac->comm_phys = 0;
+ kfree(aac->queues);
+ aac->queues = NULL;
+- aac_free_irq(aac);
+ kfree(aac->fsa_dev);
+ aac->fsa_dev = NULL;
+
+@@ -1672,14 +1672,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
+ out:
+ aac->in_reset = 0;
+ scsi_unblock_requests(host);
+- /*
+- * Issue bus rescan to catch any configuration that might have
+- * occurred
+- */
+- if (!retval) {
+- dev_info(&aac->pdev->dev, "Issuing bus rescan\n");
+- scsi_scan_host(host);
+- }
++
+ if (jafo) {
+ spin_lock_irq(host->host_lock);
+ }
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 794a4600e952..d344fef01f1d 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -6555,12 +6555,15 @@ static int ufshcd_config_vreg(struct device *dev,
+ struct ufs_vreg *vreg, bool on)
+ {
+ int ret = 0;
+- struct regulator *reg = vreg->reg;
+- const char *name = vreg->name;
++ struct regulator *reg;
++ const char *name;
+ int min_uV, uA_load;
+
+ BUG_ON(!vreg);
+
++ reg = vreg->reg;
++ name = vreg->name;
++
+ if (regulator_count_voltages(reg) > 0) {
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index babb15f07995..d51ca243a028 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1496,12 +1496,23 @@ static int spi_imx_remove(struct platform_device *pdev)
+ {
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
++ int ret;
+
+ spi_bitbang_stop(&spi_imx->bitbang);
+
++ ret = clk_enable(spi_imx->clk_per);
++ if (ret)
++ return ret;
++
++ ret = clk_enable(spi_imx->clk_ipg);
++ if (ret) {
++ clk_disable(spi_imx->clk_per);
++ return ret;
++ }
++
+ writel(0, spi_imx->base + MXC_CSPICTRL);
+- clk_unprepare(spi_imx->clk_ipg);
+- clk_unprepare(spi_imx->clk_per);
++ clk_disable_unprepare(spi_imx->clk_ipg);
++ clk_disable_unprepare(spi_imx->clk_per);
+ spi_imx_sdma_exit(spi_imx);
+ spi_master_put(master);
+
+diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
+index 8d31a93fd8b7..087a622f20b2 100644
+--- a/drivers/staging/ccree/ssi_cipher.c
++++ b/drivers/staging/ccree/ssi_cipher.c
+@@ -904,6 +904,7 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
+ scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
+ (req->nbytes - ivsize), ivsize, 0);
+ req_ctx->is_giv = false;
++ req_ctx->backup_info = NULL;
+
+ return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ }
+diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c
+index 9c6f1200c130..eeb995307951 100644
+--- a/drivers/staging/ccree/ssi_driver.c
++++ b/drivers/staging/ccree/ssi_driver.c
+@@ -141,7 +141,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
+ irr &= ~SSI_COMP_IRQ_MASK;
+ complete_request(drvdata);
+ }
+-#ifdef CC_SUPPORT_FIPS
++#ifdef CONFIG_CRYPTO_FIPS
+ /* TEE FIPS interrupt */
+ if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) {
+ /* Mask interrupt - will be unmasked in Deferred service handler */
+diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+index 64763aacda57..284cdd44a2ee 100644
+--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+@@ -825,14 +825,15 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
+ return conn;
+
+ failed_2:
+- kiblnd_destroy_conn(conn, true);
++ kiblnd_destroy_conn(conn);
++ LIBCFS_FREE(conn, sizeof(*conn));
+ failed_1:
+ LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
+ failed_0:
+ return NULL;
+ }
+
+-void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
++void kiblnd_destroy_conn(struct kib_conn *conn)
+ {
+ struct rdma_cm_id *cmid = conn->ibc_cmid;
+ struct kib_peer *peer = conn->ibc_peer;
+@@ -895,8 +896,6 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
+ rdma_destroy_id(cmid);
+ atomic_dec(&net->ibn_nconns);
+ }
+-
+- LIBCFS_FREE(conn, sizeof(*conn));
+ }
+
+ int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why)
+diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+index a1e994a1cc84..98a5e2c21a83 100644
+--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+@@ -1015,7 +1015,7 @@ int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why);
+ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer,
+ struct rdma_cm_id *cmid,
+ int state, int version);
+-void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn);
++void kiblnd_destroy_conn(struct kib_conn *conn);
+ void kiblnd_close_conn(struct kib_conn *conn, int error);
+ void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
+
+diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+index 8fc191d99927..29e10021b906 100644
+--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+@@ -3313,11 +3313,13 @@ kiblnd_connd(void *arg)
+ spin_unlock_irqrestore(lock, flags);
+ dropped_lock = 1;
+
+- kiblnd_destroy_conn(conn, !peer);
++ kiblnd_destroy_conn(conn);
+
+ spin_lock_irqsave(lock, flags);
+- if (!peer)
++ if (!peer) {
++ kfree(conn);
+ continue;
++ }
+
+ conn->ibc_peer = peer;
+ if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE)
+diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+index c0664dc80bf2..446310775e90 100644
+--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+@@ -1395,19 +1395,13 @@ static int rtw_wx_get_essid(struct net_device *dev,
+ if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
+ len = pcur_bss->Ssid.SsidLength;
+-
+- wrqu->essid.length = len;
+-
+ memcpy(extra, pcur_bss->Ssid.Ssid, len);
+-
+- wrqu->essid.flags = 1;
+ } else {
+- ret = -1;
+- goto exit;
++ len = 0;
++ *extra = 0;
+ }
+-
+-exit:
+-
++ wrqu->essid.length = len;
++ wrqu->essid.flags = 1;
+
+ return ret;
+ }
+diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
+index 1222c005fb98..951680640ad5 100644
+--- a/drivers/tty/serial/8250/8250_of.c
++++ b/drivers/tty/serial/8250/8250_of.c
+@@ -141,8 +141,11 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
+ }
+
+ info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL);
+- if (IS_ERR(info->rst))
++ if (IS_ERR(info->rst)) {
++ ret = PTR_ERR(info->rst);
+ goto err_dispose;
++ }
++
+ ret = reset_control_deassert(info->rst);
+ if (ret)
+ goto err_dispose;
+diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
+index 8a10b10e27aa..c206f173f912 100644
+--- a/drivers/tty/serial/8250/8250_uniphier.c
++++ b/drivers/tty/serial/8250/8250_uniphier.c
+@@ -259,12 +259,13 @@ static int uniphier_uart_probe(struct platform_device *pdev)
+ up.dl_read = uniphier_serial_dl_read;
+ up.dl_write = uniphier_serial_dl_write;
+
+- priv->line = serial8250_register_8250_port(&up);
+- if (priv->line < 0) {
++ ret = serial8250_register_8250_port(&up);
++ if (ret < 0) {
+ dev_err(dev, "failed to register 8250 port\n");
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
++ priv->line = ret;
+
+ platform_set_drvdata(pdev, priv);
+
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 3657d745e90f..521500c575c8 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -2275,12 +2275,14 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
+ val &= ~UCR3_AWAKEN;
+ writel(val, sport->port.membase + UCR3);
+
+- val = readl(sport->port.membase + UCR1);
+- if (on)
+- val |= UCR1_RTSDEN;
+- else
+- val &= ~UCR1_RTSDEN;
+- writel(val, sport->port.membase + UCR1);
++ if (sport->have_rtscts) {
++ val = readl(sport->port.membase + UCR1);
++ if (on)
++ val |= UCR1_RTSDEN;
++ else
++ val &= ~UCR1_RTSDEN;
++ writel(val, sport->port.membase + UCR1);
++ }
+ }
+
+ static int imx_serial_port_suspend_noirq(struct device *dev)
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 94cccb6efa32..7892d0be8af9 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1322,6 +1322,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
+ "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n",
+ __func__, tty->driver->name);
+
++ retval = tty_ldisc_lock(tty, 5 * HZ);
++ if (retval)
++ goto err_release_lock;
+ tty->port->itty = tty;
+
+ /*
+@@ -1332,6 +1335,7 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
+ retval = tty_ldisc_setup(tty, tty->link);
+ if (retval)
+ goto err_release_tty;
++ tty_ldisc_unlock(tty);
+ /* Return the tty locked so that it cannot vanish under the caller */
+ return tty;
+
+@@ -1344,9 +1348,11 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
+
+ /* call the tty release_tty routine to clean out this slot */
+ err_release_tty:
+- tty_unlock(tty);
++ tty_ldisc_unlock(tty);
+ tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n",
+ retval, idx);
++err_release_lock:
++ tty_unlock(tty);
+ release_tty(tty, idx);
+ return ERR_PTR(retval);
+ }
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index 84a8ac2a779f..7c895684c3ef 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -336,7 +336,7 @@ static inline void __tty_ldisc_unlock(struct tty_struct *tty)
+ ldsem_up_write(&tty->ldisc_sem);
+ }
+
+-static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
++int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
+ {
+ int ret;
+
+@@ -347,7 +347,7 @@ static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
+ return 0;
+ }
+
+-static void tty_ldisc_unlock(struct tty_struct *tty)
++void tty_ldisc_unlock(struct tty_struct *tty)
+ {
+ clear_bit(TTY_LDISC_HALTED, &tty->flags);
+ __tty_ldisc_unlock(tty);
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 18c923a4c16e..4149a965516e 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -438,7 +438,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
+
+ res = usb_submit_urb(acm->read_urbs[index], mem_flags);
+ if (res) {
+- if (res != -EPERM) {
++ if (res != -EPERM && res != -ENODEV) {
+ dev_err(&acm->data->dev,
+ "urb %d failed submission with %d\n",
+ index, res);
+@@ -1765,6 +1765,9 @@ static const struct usb_device_id acm_ids[] = {
+ { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */
+ .driver_info = SINGLE_RX_URB, /* firmware bug */
+ },
++ { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
++ .driver_info = SINGLE_RX_URB,
++ },
+ { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 5d061b3d8224..ed9346f0b000 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -150,7 +150,6 @@ int config_ep_by_speed(struct usb_gadget *g,
+ struct usb_function *f,
+ struct usb_ep *_ep)
+ {
+- struct usb_composite_dev *cdev = get_gadget_data(g);
+ struct usb_endpoint_descriptor *chosen_desc = NULL;
+ struct usb_descriptor_header **speed_desc = NULL;
+
+@@ -229,8 +228,12 @@ int config_ep_by_speed(struct usb_gadget *g,
+ _ep->maxburst = comp_desc->bMaxBurst + 1;
+ break;
+ default:
+- if (comp_desc->bMaxBurst != 0)
++ if (comp_desc->bMaxBurst != 0) {
++ struct usb_composite_dev *cdev;
++
++ cdev = get_gadget_data(g);
+ ERROR(cdev, "ep0 bMaxBurst must be 0\n");
++ }
+ _ep->maxburst = 1;
+ break;
+ }
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 876cdbec1307..c0491dd73f53 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -3704,7 +3704,8 @@ static void ffs_closed(struct ffs_data *ffs)
+ ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
+ ffs_dev_unlock();
+
+- unregister_gadget_item(ci);
++ if (test_bit(FFS_FL_BOUND, &ffs->flags))
++ unregister_gadget_item(ci);
+ return;
+ done:
+ ffs_dev_unlock();
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 284bd1a7b570..794bb4958383 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -923,7 +923,7 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
+ return 0;
+
+ /* "high bandwidth" works only at high speed */
+- if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11))
++ if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1)
+ return 0;
+
+ switch (type) {
+diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
+index a8d5f2e4878d..c66b93664d54 100644
+--- a/drivers/usb/serial/Kconfig
++++ b/drivers/usb/serial/Kconfig
+@@ -63,6 +63,7 @@ config USB_SERIAL_SIMPLE
+ - Google USB serial devices
+ - HP4x calculators
+ - a number of Motorola phones
++ - Motorola Tetra devices
+ - Novatel Wireless GPS receivers
+ - Siemens USB/MPI adapter.
+ - ViVOtech ViVOpay USB device.
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
+index bdf8bd814a9a..01f3ac7769f3 100644
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -2286,7 +2286,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port,
+ /* something went wrong */
+ dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n",
+ __func__, status);
+- usb_kill_urb(urb);
+ usb_free_urb(urb);
+ atomic_dec(&CmdUrbs);
+ return status;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index a9400458ccea..dcf78a498927 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -383,6 +383,9 @@ static void option_instat_callback(struct urb *urb);
+ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
+ #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
+
++/* Fujisoft products */
++#define FUJISOFT_PRODUCT_FS040U 0x9b02
++
+ /* iBall 3.5G connect wireless modem */
+ #define IBALL_3_5G_CONNECT 0x9605
+
+@@ -1897,6 +1900,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
+ .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
+ },
++ {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index a585b477415d..34c5a75f98a7 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -41,6 +41,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
++ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index 3b5a15d1dc0d..123289085ee2 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -17,6 +17,7 @@
+ #define PL2303_PRODUCT_ID_DCU11 0x1234
+ #define PL2303_PRODUCT_ID_PHAROS 0xaaa0
+ #define PL2303_PRODUCT_ID_RSAQ3 0xaaa2
++#define PL2303_PRODUCT_ID_CHILITAG 0xaaa8
+ #define PL2303_PRODUCT_ID_ALDIGA 0x0611
+ #define PL2303_PRODUCT_ID_MMX 0x0612
+ #define PL2303_PRODUCT_ID_GPRS 0x0609
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index e98b6e57b703..6aa7ff2c1cf7 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -80,6 +80,11 @@ DEVICE(vivopay, VIVOPAY_IDS);
+ { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */
+ DEVICE(moto_modem, MOTO_IDS);
+
++/* Motorola Tetra driver */
++#define MOTOROLA_TETRA_IDS() \
++ { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
++DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
++
+ /* Novatel Wireless GPS driver */
+ #define NOVATEL_IDS() \
+ { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
+@@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
+ &google_device,
+ &vivopay_device,
+ &moto_modem_device,
++ &motorola_tetra_device,
+ &novatel_gps_device,
+ &hp4x_device,
+ &suunto_device,
+@@ -125,6 +131,7 @@ static const struct usb_device_id id_table[] = {
+ GOOGLE_IDS(),
+ VIVOPAY_IDS(),
+ MOTO_IDS(),
++ MOTOROLA_TETRA_IDS(),
+ NOVATEL_IDS(),
+ HP4X_IDS(),
+ SUUNTO_IDS(),
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 63cf981ed81c..0bc8543e96b1 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -1076,20 +1076,19 @@ static int uas_post_reset(struct usb_interface *intf)
+ return 0;
+
+ err = uas_configure_endpoints(devinfo);
+- if (err) {
++ if (err && err != ENODEV)
+ shost_printk(KERN_ERR, shost,
+ "%s: alloc streams error %d after reset",
+ __func__, err);
+- return 1;
+- }
+
++ /* we must unblock the host in every case lest we deadlock */
+ spin_lock_irqsave(shost->host_lock, flags);
+ scsi_report_bus_reset(shost, 0);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ scsi_unblock_requests(shost);
+
+- return 0;
++ return err ? 1 : 0;
+ }
+
+ static int uas_suspend(struct usb_interface *intf, pm_message_t message)
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index aafcc785f840..d564a7049d7f 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2056,6 +2056,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ len = (u64)end - (u64)start + 1;
+ trace_btrfs_sync_file(file, datasync);
+
++ btrfs_init_log_ctx(&ctx, inode);
++
+ /*
+ * We write the dirty pages in the range and wait until they complete
+ * out of the ->i_mutex. If so, we can flush the dirty pages by
+@@ -2202,8 +2204,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ }
+ trans->sync = true;
+
+- btrfs_init_log_ctx(&ctx, inode);
+-
+ ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
+ if (ret < 0) {
+ /* Fallthrough and commit/free transaction. */
+@@ -2261,6 +2261,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ ret = btrfs_end_transaction(trans);
+ }
+ out:
++ ASSERT(list_empty(&ctx.list));
+ err = file_check_and_advance_wb_err(file);
+ if (!ret)
+ ret = err;
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index cdc9f4015ec3..4426d1c73e50 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -1264,7 +1264,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+ /* Lock all pages first so we can lock the extent safely. */
+ ret = io_ctl_prepare_pages(io_ctl, inode, 0);
+ if (ret)
+- goto out;
++ goto out_unlock;
+
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+ &cached_state);
+@@ -1358,6 +1358,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+ out_nospc:
+ cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
+
++out_unlock:
+ if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+ up_write(&block_group->data_rwsem);
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index d94e3f68b9b1..c71afd424900 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5500,6 +5500,14 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
+ goto out_err;
+
+ btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
++ if (location->type != BTRFS_INODE_ITEM_KEY &&
++ location->type != BTRFS_ROOT_ITEM_KEY) {
++ btrfs_warn(root->fs_info,
++"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
++ __func__, name, btrfs_ino(BTRFS_I(dir)),
++ location->objectid, location->type, location->offset);
++ goto out_err;
++ }
+ out:
+ btrfs_free_path(path);
+ return ret;
+@@ -5816,8 +5824,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
+ return inode;
+ }
+
+- BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
+-
+ index = srcu_read_lock(&fs_info->subvol_srcu);
+ ret = fixup_tree_root_location(fs_info, dir, dentry,
+ &location, &sub_root);
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 8fd195cfe81b..2c35717a3470 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -3527,7 +3527,40 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
+ }
+
+ /*
+- * Check if ino ino1 is an ancestor of inode ino2 in the given root.
++ * Check if inode ino2, or any of its ancestors, is inode ino1.
++ * Return 1 if true, 0 if false and < 0 on error.
++ */
++static int check_ino_in_path(struct btrfs_root *root,
++ const u64 ino1,
++ const u64 ino1_gen,
++ const u64 ino2,
++ const u64 ino2_gen,
++ struct fs_path *fs_path)
++{
++ u64 ino = ino2;
++
++ if (ino1 == ino2)
++ return ino1_gen == ino2_gen;
++
++ while (ino > BTRFS_FIRST_FREE_OBJECTID) {
++ u64 parent;
++ u64 parent_gen;
++ int ret;
++
++ fs_path_reset(fs_path);
++ ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
++ if (ret < 0)
++ return ret;
++ if (parent == ino1)
++ return parent_gen == ino1_gen;
++ ino = parent;
++ }
++ return 0;
++}
++
++/*
++ * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
++ * possible path (in case ino2 is not a directory and has multiple hard links).
+ * Return 1 if true, 0 if false and < 0 on error.
+ */
+ static int is_ancestor(struct btrfs_root *root,
+@@ -3536,36 +3569,91 @@ static int is_ancestor(struct btrfs_root *root,
+ const u64 ino2,
+ struct fs_path *fs_path)
+ {
+- u64 ino = ino2;
+- bool free_path = false;
++ bool free_fs_path = false;
+ int ret = 0;
++ struct btrfs_path *path = NULL;
++ struct btrfs_key key;
+
+ if (!fs_path) {
+ fs_path = fs_path_alloc();
+ if (!fs_path)
+ return -ENOMEM;
+- free_path = true;
++ free_fs_path = true;
+ }
+
+- while (ino > BTRFS_FIRST_FREE_OBJECTID) {
+- u64 parent;
+- u64 parent_gen;
++ path = alloc_path_for_send();
++ if (!path) {
++ ret = -ENOMEM;
++ goto out;
++ }
+
+- fs_path_reset(fs_path);
+- ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
+- if (ret < 0) {
+- if (ret == -ENOENT && ino == ino2)
+- ret = 0;
+- goto out;
++ key.objectid = ino2;
++ key.type = BTRFS_INODE_REF_KEY;
++ key.offset = 0;
++
++ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
++ if (ret < 0)
++ goto out;
++
++ while (true) {
++ struct extent_buffer *leaf = path->nodes[0];
++ int slot = path->slots[0];
++ u32 cur_offset = 0;
++ u32 item_size;
++
++ if (slot >= btrfs_header_nritems(leaf)) {
++ ret = btrfs_next_leaf(root, path);
++ if (ret < 0)
++ goto out;
++ if (ret > 0)
++ break;
++ continue;
+ }
+- if (parent == ino1) {
+- ret = parent_gen == ino1_gen ? 1 : 0;
+- goto out;
++
++ btrfs_item_key_to_cpu(leaf, &key, slot);
++ if (key.objectid != ino2)
++ break;
++ if (key.type != BTRFS_INODE_REF_KEY &&
++ key.type != BTRFS_INODE_EXTREF_KEY)
++ break;
++
++ item_size = btrfs_item_size_nr(leaf, slot);
++ while (cur_offset < item_size) {
++ u64 parent;
++ u64 parent_gen;
++
++ if (key.type == BTRFS_INODE_EXTREF_KEY) {
++ unsigned long ptr;
++ struct btrfs_inode_extref *extref;
++
++ ptr = btrfs_item_ptr_offset(leaf, slot);
++ extref = (struct btrfs_inode_extref *)
++ (ptr + cur_offset);
++ parent = btrfs_inode_extref_parent(leaf,
++ extref);
++ cur_offset += sizeof(*extref);
++ cur_offset += btrfs_inode_extref_name_len(leaf,
++ extref);
++ } else {
++ parent = key.offset;
++ cur_offset = item_size;
++ }
++
++ ret = get_inode_info(root, parent, NULL, &parent_gen,
++ NULL, NULL, NULL, NULL);
++ if (ret < 0)
++ goto out;
++ ret = check_ino_in_path(root, ino1, ino1_gen,
++ parent, parent_gen, fs_path);
++ if (ret)
++ goto out;
+ }
+- ino = parent;
++ path->slots[0]++;
+ }
++ ret = 0;
+ out:
+- if (free_path)
++ btrfs_free_path(path);
++ if (free_fs_path)
+ fs_path_free(fs_path);
+ return ret;
+ }
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index c800d067fcbf..d3002842d7f6 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4100,7 +4100,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
+
+ if (ordered_io_err) {
+ ctx->io_err = -EIO;
+- return 0;
++ return ctx->io_err;
+ }
+
+ btrfs_init_map_token(&token);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 0c11121a8ace..4006b2a1233d 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1765,20 +1765,24 @@ static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info,
+ key.offset = device->devid;
+
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+- if (ret < 0)
+- goto out;
+-
+- if (ret > 0) {
+- ret = -ENOENT;
++ if (ret) {
++ if (ret > 0)
++ ret = -ENOENT;
++ btrfs_abort_transaction(trans, ret);
++ btrfs_end_transaction(trans);
+ goto out;
+ }
+
+ ret = btrfs_del_item(trans, root, path);
+- if (ret)
+- goto out;
++ if (ret) {
++ btrfs_abort_transaction(trans, ret);
++ btrfs_end_transaction(trans);
++ }
++
+ out:
+ btrfs_free_path(path);
+- btrfs_commit_transaction(trans);
++ if (!ret)
++ ret = btrfs_commit_transaction(trans);
+ return ret;
+ }
+
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 45e96549ebd2..809cbccbad28 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -57,6 +57,9 @@ static struct task_struct *nlmsvc_task;
+ static struct svc_rqst *nlmsvc_rqst;
+ unsigned long nlmsvc_timeout;
+
++atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0);
++DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq);
++
+ unsigned int lockd_net_id;
+
+ /*
+@@ -292,7 +295,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ struct sockaddr_in sin;
+
+- if (event != NETDEV_DOWN)
++ if ((event != NETDEV_DOWN) ||
++ !atomic_inc_not_zero(&nlm_ntf_refcnt))
+ goto out;
+
+ if (nlmsvc_rqst) {
+@@ -303,6 +307,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
+ svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
+ (struct sockaddr *)&sin);
+ }
++ atomic_dec(&nlm_ntf_refcnt);
++ wake_up(&nlm_ntf_wq);
+
+ out:
+ return NOTIFY_DONE;
+@@ -319,7 +325,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
+ struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
+ struct sockaddr_in6 sin6;
+
+- if (event != NETDEV_DOWN)
++ if ((event != NETDEV_DOWN) ||
++ !atomic_inc_not_zero(&nlm_ntf_refcnt))
+ goto out;
+
+ if (nlmsvc_rqst) {
+@@ -331,6 +338,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
+ svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
+ (struct sockaddr *)&sin6);
+ }
++ atomic_dec(&nlm_ntf_refcnt);
++ wake_up(&nlm_ntf_wq);
+
+ out:
+ return NOTIFY_DONE;
+@@ -347,10 +356,12 @@ static void lockd_unregister_notifiers(void)
+ #if IS_ENABLED(CONFIG_IPV6)
+ unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
+ #endif
++ wait_event(nlm_ntf_wq, atomic_read(&nlm_ntf_refcnt) == 0);
+ }
+
+ static void lockd_svc_exit_thread(void)
+ {
++ atomic_dec(&nlm_ntf_refcnt);
+ lockd_unregister_notifiers();
+ svc_exit_thread(nlmsvc_rqst);
+ }
+@@ -375,6 +386,7 @@ static int lockd_start_svc(struct svc_serv *serv)
+ goto out_rqst;
+ }
+
++ atomic_inc(&nlm_ntf_refcnt);
+ svc_sock_update_bufs(serv);
+ serv->sv_maxconn = nlm_max_connections;
+
+diff --git a/fs/namespace.c b/fs/namespace.c
+index d18deb4c410b..adae9ffce91d 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2826,6 +2826,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
+ SB_DIRSYNC |
+ SB_SILENT |
+ SB_POSIXACL |
++ SB_LAZYTIME |
+ SB_I_VERSION);
+
+ if (flags & MS_REMOUNT)
+diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
+index 420d3a0ab258..3b13fb3b0553 100644
+--- a/fs/nfs_common/grace.c
++++ b/fs/nfs_common/grace.c
+@@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm)
+ struct list_head *grace_list = net_generic(net, grace_net_id);
+
+ spin_lock(&grace_lock);
+- list_add(&lm->list, grace_list);
++ if (list_empty(&lm->list))
++ list_add(&lm->list, grace_list);
++ else
++ WARN(1, "double list_add attempt detected in net %x %s\n",
++ net->ns.inum, (net == &init_net) ? "(init_net)" : "");
+ spin_unlock(&grace_lock);
+ }
+ EXPORT_SYMBOL_GPL(locks_start_grace);
+@@ -104,7 +108,9 @@ grace_exit_net(struct net *net)
+ {
+ struct list_head *grace_list = net_generic(net, grace_net_id);
+
+- BUG_ON(!list_empty(grace_list));
++ WARN_ONCE(!list_empty(grace_list),
++ "net %x %s: grace_list is not empty\n",
++ net->ns.inum, __func__);
+ }
+
+ static struct pernet_operations grace_net_ops = {
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index a439a70177a4..d89e6ccd33ba 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -63,12 +63,16 @@ static const stateid_t zero_stateid = {
+ static const stateid_t currentstateid = {
+ .si_generation = 1,
+ };
++static const stateid_t close_stateid = {
++ .si_generation = 0xffffffffU,
++};
+
+ static u64 current_sessionid = 1;
+
+ #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
+ #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
+ #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
++#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
+
+ /* forward declarations */
+ static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
+@@ -4866,7 +4870,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
+ struct nfs4_stid *s;
+ __be32 status = nfserr_bad_stateid;
+
+- if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
++ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
++ CLOSE_STATEID(stateid))
+ return status;
+ /* Client debugging aid. */
+ if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
+@@ -4924,7 +4929,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
+ else if (typemask & NFS4_DELEG_STID)
+ typemask |= NFS4_REVOKED_DELEG_STID;
+
+- if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
++ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
++ CLOSE_STATEID(stateid))
+ return nfserr_bad_stateid;
+ status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
+ if (status == nfserr_stale_clientid) {
+@@ -5177,15 +5183,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
+ status = nfsd4_check_seqid(cstate, sop, seqid);
+ if (status)
+ return status;
+- if (stp->st_stid.sc_type == NFS4_CLOSED_STID
+- || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
+- /*
+- * "Closed" stateid's exist *only* to return
+- * nfserr_replay_me from the previous step, and
+- * revoked delegations are kept only for free_stateid.
+- */
+- return nfserr_bad_stateid;
+- mutex_lock(&stp->st_mutex);
++ status = nfsd4_lock_ol_stateid(stp);
++ if (status != nfs_ok)
++ return status;
+ status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
+ if (status == nfs_ok)
+ status = nfs4_check_fh(current_fh, &stp->st_stid);
+@@ -5411,6 +5411,11 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ nfsd4_close_open_stateid(stp);
+ mutex_unlock(&stp->st_mutex);
+
++ /* See RFC5661 sectionm 18.2.4 */
++ if (stp->st_stid.sc_client->cl_minorversion)
++ memcpy(&close->cl_stateid, &close_stateid,
++ sizeof(close->cl_stateid));
++
+ /* put reference from nfs4_preprocess_seqid_op */
+ nfs4_put_stid(&stp->st_stid);
+ out:
+@@ -7016,6 +7021,10 @@ static int nfs4_state_create_net(struct net *net)
+ INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
+ nn->conf_name_tree = RB_ROOT;
+ nn->unconf_name_tree = RB_ROOT;
++ nn->boot_time = get_seconds();
++ nn->grace_ended = false;
++ nn->nfsd4_manager.block_opens = true;
++ INIT_LIST_HEAD(&nn->nfsd4_manager.list);
+ INIT_LIST_HEAD(&nn->client_lru);
+ INIT_LIST_HEAD(&nn->close_lru);
+ INIT_LIST_HEAD(&nn->del_recall_lru);
+@@ -7073,9 +7082,6 @@ nfs4_state_start_net(struct net *net)
+ ret = nfs4_state_create_net(net);
+ if (ret)
+ return ret;
+- nn->boot_time = get_seconds();
+- nn->grace_ended = false;
+- nn->nfsd4_manager.block_opens = true;
+ locks_start_grace(net, &nn->nfsd4_manager);
+ nfsd4_client_tracking_init(net);
+ printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 9f78b5015f2e..4cd0c2336624 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -934,12 +934,13 @@ static int dqinit_needed(struct inode *inode, int type)
+ }
+
+ /* This routine is guarded by s_umount semaphore */
+-static void add_dquot_ref(struct super_block *sb, int type)
++static int add_dquot_ref(struct super_block *sb, int type)
+ {
+ struct inode *inode, *old_inode = NULL;
+ #ifdef CONFIG_QUOTA_DEBUG
+ int reserved = 0;
+ #endif
++ int err = 0;
+
+ spin_lock(&sb->s_inode_list_lock);
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+@@ -959,7 +960,11 @@ static void add_dquot_ref(struct super_block *sb, int type)
+ reserved = 1;
+ #endif
+ iput(old_inode);
+- __dquot_initialize(inode, type);
++ err = __dquot_initialize(inode, type);
++ if (err) {
++ iput(inode);
++ goto out;
++ }
+
+ /*
+ * We hold a reference to 'inode' so it couldn't have been
+@@ -974,7 +979,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
+ }
+ spin_unlock(&sb->s_inode_list_lock);
+ iput(old_inode);
+-
++out:
+ #ifdef CONFIG_QUOTA_DEBUG
+ if (reserved) {
+ quota_error(sb, "Writes happened before quota was turned on "
+@@ -982,6 +987,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
+ "Please run quotacheck(8)");
+ }
+ #endif
++ return err;
+ }
+
+ /*
+@@ -2372,10 +2378,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
+ dqopt->flags |= dquot_state_flag(flags, type);
+ spin_unlock(&dq_state_lock);
+
+- add_dquot_ref(sb, type);
+-
+- return 0;
++ error = add_dquot_ref(sb, type);
++ if (error)
++ dquot_disable(sb, type, flags);
+
++ return error;
+ out_file_init:
+ dqopt->files[type] = NULL;
+ iput(inode);
+@@ -2978,7 +2985,8 @@ static int __init dquot_init(void)
+ pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
+ " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
+
+- register_shrinker(&dqcache_shrinker);
++ if (register_shrinker(&dqcache_shrinker))
++ panic("Cannot register dquot shrinker");
+
+ return 0;
+ }
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 5464ec517702..4885c7b6e44f 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -2591,7 +2591,6 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
+ return err;
+ if (inode->i_size < off + len - towrite)
+ i_size_write(inode, off + len - towrite);
+- inode->i_version++;
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ mark_inode_dirty(inode);
+ return len - towrite;
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index a3eeaba156c5..b0cccf8a81a8 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -399,7 +399,7 @@ xfs_map_blocks(
+ (ip->i_df.if_flags & XFS_IFEXTENTS));
+ ASSERT(offset <= mp->m_super->s_maxbytes);
+
+- if (offset + count > mp->m_super->s_maxbytes)
++ if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
+ count = mp->m_super->s_maxbytes - offset;
+ end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+@@ -1265,7 +1265,7 @@ xfs_map_trim_size(
+ if (mapping_size > size)
+ mapping_size = size;
+ if (offset < i_size_read(inode) &&
+- offset + mapping_size >= i_size_read(inode)) {
++ (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
+ /* limit mapping to block that spans EOF */
+ mapping_size = roundup_64(i_size_read(inode) - offset,
+ i_blocksize(inode));
+@@ -1312,7 +1312,7 @@ xfs_get_blocks(
+ lockmode = xfs_ilock_data_map_shared(ip);
+
+ ASSERT(offset <= mp->m_super->s_maxbytes);
+- if (offset + size > mp->m_super->s_maxbytes)
++ if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
+ size = mp->m_super->s_maxbytes - offset;
+ end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
+index dd136f7275e4..e5fb008d75e8 100644
+--- a/fs/xfs/xfs_bmap_item.c
++++ b/fs/xfs/xfs_bmap_item.c
+@@ -389,7 +389,8 @@ xfs_bud_init(
+ int
+ xfs_bui_recover(
+ struct xfs_mount *mp,
+- struct xfs_bui_log_item *buip)
++ struct xfs_bui_log_item *buip,
++ struct xfs_defer_ops *dfops)
+ {
+ int error = 0;
+ unsigned int bui_type;
+@@ -404,9 +405,7 @@ xfs_bui_recover(
+ xfs_exntst_t state;
+ struct xfs_trans *tp;
+ struct xfs_inode *ip = NULL;
+- struct xfs_defer_ops dfops;
+ struct xfs_bmbt_irec irec;
+- xfs_fsblock_t firstfsb;
+
+ ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));
+
+@@ -464,7 +463,6 @@ xfs_bui_recover(
+
+ if (VFS_I(ip)->i_nlink == 0)
+ xfs_iflags_set(ip, XFS_IRECOVERY);
+- xfs_defer_init(&dfops, &firstfsb);
+
+ /* Process deferred bmap item. */
+ state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
+@@ -479,16 +477,16 @@ xfs_bui_recover(
+ break;
+ default:
+ error = -EFSCORRUPTED;
+- goto err_dfops;
++ goto err_inode;
+ }
+ xfs_trans_ijoin(tp, ip, 0);
+
+ count = bmap->me_len;
+- error = xfs_trans_log_finish_bmap_update(tp, budp, &dfops, type,
++ error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type,
+ ip, whichfork, bmap->me_startoff,
+ bmap->me_startblock, &count, state);
+ if (error)
+- goto err_dfops;
++ goto err_inode;
+
+ if (count > 0) {
+ ASSERT(type == XFS_BMAP_UNMAP);
+@@ -496,16 +494,11 @@ xfs_bui_recover(
+ irec.br_blockcount = count;
+ irec.br_startoff = bmap->me_startoff;
+ irec.br_state = state;
+- error = xfs_bmap_unmap_extent(tp->t_mountp, &dfops, ip, &irec);
++ error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec);
+ if (error)
+- goto err_dfops;
++ goto err_inode;
+ }
+
+- /* Finish transaction, free inodes. */
+- error = xfs_defer_finish(&tp, &dfops);
+- if (error)
+- goto err_dfops;
+-
+ set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
+ error = xfs_trans_commit(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+@@ -513,8 +506,6 @@ xfs_bui_recover(
+
+ return error;
+
+-err_dfops:
+- xfs_defer_cancel(&dfops);
+ err_inode:
+ xfs_trans_cancel(tp);
+ if (ip) {
+diff --git a/fs/xfs/xfs_bmap_item.h b/fs/xfs/xfs_bmap_item.h
+index c867daae4a3c..24b354a2c836 100644
+--- a/fs/xfs/xfs_bmap_item.h
++++ b/fs/xfs/xfs_bmap_item.h
+@@ -93,6 +93,7 @@ struct xfs_bud_log_item *xfs_bud_init(struct xfs_mount *,
+ struct xfs_bui_log_item *);
+ void xfs_bui_item_free(struct xfs_bui_log_item *);
+ void xfs_bui_release(struct xfs_bui_log_item *);
+-int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip);
++int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip,
++ struct xfs_defer_ops *dfops);
+
+ #endif /* __XFS_BMAP_ITEM_H__ */
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index 2f97c12ca75e..16f93d7356b7 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -1813,22 +1813,27 @@ xfs_alloc_buftarg(
+ btp->bt_daxdev = dax_dev;
+
+ if (xfs_setsize_buftarg_early(btp, bdev))
+- goto error;
++ goto error_free;
+
+ if (list_lru_init(&btp->bt_lru))
+- goto error;
++ goto error_free;
+
+ if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
+- goto error;
++ goto error_lru;
+
+ btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
+ btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
+ btp->bt_shrinker.seeks = DEFAULT_SEEKS;
+ btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
+- register_shrinker(&btp->bt_shrinker);
++ if (register_shrinker(&btp->bt_shrinker))
++ goto error_pcpu;
+ return btp;
+
+-error:
++error_pcpu:
++ percpu_counter_destroy(&btp->bt_io_count);
++error_lru:
++ list_lru_destroy(&btp->bt_lru);
++error_free:
+ kmem_free(btp);
+ return NULL;
+ }
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
+index cd82429d8df7..5a86495127fd 100644
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -987,14 +987,22 @@ xfs_qm_dqflush_done(
+ * holding the lock before removing the dquot from the AIL.
+ */
+ if ((lip->li_flags & XFS_LI_IN_AIL) &&
+- lip->li_lsn == qip->qli_flush_lsn) {
++ ((lip->li_lsn == qip->qli_flush_lsn) ||
++ (lip->li_flags & XFS_LI_FAILED))) {
+
+ /* xfs_trans_ail_delete() drops the AIL lock. */
+ spin_lock(&ailp->xa_lock);
+- if (lip->li_lsn == qip->qli_flush_lsn)
++ if (lip->li_lsn == qip->qli_flush_lsn) {
+ xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
+- else
++ } else {
++ /*
++ * Clear the failed state since we are about to drop the
++ * flush lock
++ */
++ if (lip->li_flags & XFS_LI_FAILED)
++ xfs_clear_li_failed(lip);
+ spin_unlock(&ailp->xa_lock);
++ }
+ }
+
+ /*
+diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
+index 2c7a1629e064..664dea105e76 100644
+--- a/fs/xfs/xfs_dquot_item.c
++++ b/fs/xfs/xfs_dquot_item.c
+@@ -137,6 +137,26 @@ xfs_qm_dqunpin_wait(
+ wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
+ }
+
++/*
++ * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
++ * have been failed during writeback
++ *
++ * this informs the AIL that the dquot is already flush locked on the next push,
++ * and acquires a hold on the buffer to ensure that it isn't reclaimed before
++ * dirty data makes it to disk.
++ */
++STATIC void
++xfs_dquot_item_error(
++ struct xfs_log_item *lip,
++ struct xfs_buf *bp)
++{
++ struct xfs_dquot *dqp;
++
++ dqp = DQUOT_ITEM(lip)->qli_dquot;
++ ASSERT(!completion_done(&dqp->q_flush));
++ xfs_set_li_failed(lip, bp);
++}
++
+ STATIC uint
+ xfs_qm_dquot_logitem_push(
+ struct xfs_log_item *lip,
+@@ -144,13 +164,28 @@ xfs_qm_dquot_logitem_push(
+ __acquires(&lip->li_ailp->xa_lock)
+ {
+ struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
+- struct xfs_buf *bp = NULL;
++ struct xfs_buf *bp = lip->li_buf;
+ uint rval = XFS_ITEM_SUCCESS;
+ int error;
+
+ if (atomic_read(&dqp->q_pincount) > 0)
+ return XFS_ITEM_PINNED;
+
++ /*
++ * The buffer containing this item failed to be written back
++ * previously. Resubmit the buffer for IO
++ */
++ if (lip->li_flags & XFS_LI_FAILED) {
++ if (!xfs_buf_trylock(bp))
++ return XFS_ITEM_LOCKED;
++
++ if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
++ rval = XFS_ITEM_FLUSHING;
++
++ xfs_buf_unlock(bp);
++ return rval;
++ }
++
+ if (!xfs_dqlock_nowait(dqp))
+ return XFS_ITEM_LOCKED;
+
+@@ -242,7 +277,8 @@ static const struct xfs_item_ops xfs_dquot_item_ops = {
+ .iop_unlock = xfs_qm_dquot_logitem_unlock,
+ .iop_committed = xfs_qm_dquot_logitem_committed,
+ .iop_push = xfs_qm_dquot_logitem_push,
+- .iop_committing = xfs_qm_dquot_logitem_committing
++ .iop_committing = xfs_qm_dquot_logitem_committing,
++ .iop_error = xfs_dquot_item_error
+ };
+
+ /*
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index 63350906961a..cb4833d06467 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -2421,6 +2421,24 @@ xfs_ifree_cluster(
+ return 0;
+ }
+
++/*
++ * Free any local-format buffers sitting around before we reset to
++ * extents format.
++ */
++static inline void
++xfs_ifree_local_data(
++ struct xfs_inode *ip,
++ int whichfork)
++{
++ struct xfs_ifork *ifp;
++
++ if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
++ return;
++
++ ifp = XFS_IFORK_PTR(ip, whichfork);
++ xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
++}
++
+ /*
+ * This is called to return an inode to the inode free list.
+ * The inode should already be truncated to 0 length and have
+@@ -2458,6 +2476,9 @@ xfs_ifree(
+ if (error)
+ return error;
+
++ xfs_ifree_local_data(ip, XFS_DATA_FORK);
++ xfs_ifree_local_data(ip, XFS_ATTR_FORK);
++
+ VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
+ ip->i_d.di_flags = 0;
+ ip->i_d.di_dmevmask = 0;
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index d6e049fdd977..eaf29646c28f 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -24,6 +24,7 @@
+ #include "xfs_bit.h"
+ #include "xfs_sb.h"
+ #include "xfs_mount.h"
++#include "xfs_defer.h"
+ #include "xfs_da_format.h"
+ #include "xfs_da_btree.h"
+ #include "xfs_inode.h"
+@@ -4714,7 +4715,8 @@ STATIC int
+ xlog_recover_process_cui(
+ struct xfs_mount *mp,
+ struct xfs_ail *ailp,
+- struct xfs_log_item *lip)
++ struct xfs_log_item *lip,
++ struct xfs_defer_ops *dfops)
+ {
+ struct xfs_cui_log_item *cuip;
+ int error;
+@@ -4727,7 +4729,7 @@ xlog_recover_process_cui(
+ return 0;
+
+ spin_unlock(&ailp->xa_lock);
+- error = xfs_cui_recover(mp, cuip);
++ error = xfs_cui_recover(mp, cuip, dfops);
+ spin_lock(&ailp->xa_lock);
+
+ return error;
+@@ -4754,7 +4756,8 @@ STATIC int
+ xlog_recover_process_bui(
+ struct xfs_mount *mp,
+ struct xfs_ail *ailp,
+- struct xfs_log_item *lip)
++ struct xfs_log_item *lip,
++ struct xfs_defer_ops *dfops)
+ {
+ struct xfs_bui_log_item *buip;
+ int error;
+@@ -4767,7 +4770,7 @@ xlog_recover_process_bui(
+ return 0;
+
+ spin_unlock(&ailp->xa_lock);
+- error = xfs_bui_recover(mp, buip);
++ error = xfs_bui_recover(mp, buip, dfops);
+ spin_lock(&ailp->xa_lock);
+
+ return error;
+@@ -4803,6 +4806,46 @@ static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
+ }
+ }
+
++/* Take all the collected deferred ops and finish them in order. */
++static int
++xlog_finish_defer_ops(
++ struct xfs_mount *mp,
++ struct xfs_defer_ops *dfops)
++{
++ struct xfs_trans *tp;
++ int64_t freeblks;
++ uint resblks;
++ int error;
++
++ /*
++ * We're finishing the defer_ops that accumulated as a result of
++ * recovering unfinished intent items during log recovery. We
++ * reserve an itruncate transaction because it is the largest
++ * permanent transaction type. Since we're the only user of the fs
++ * right now, take 93% (15/16) of the available free blocks. Use
++ * weird math to avoid a 64-bit division.
++ */
++ freeblks = percpu_counter_sum(&mp->m_fdblocks);
++ if (freeblks <= 0)
++ return -ENOSPC;
++ resblks = min_t(int64_t, UINT_MAX, freeblks);
++ resblks = (resblks * 15) >> 4;
++ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
++ 0, XFS_TRANS_RESERVE, &tp);
++ if (error)
++ return error;
++
++ error = xfs_defer_finish(&tp, dfops);
++ if (error)
++ goto out_cancel;
++
++ return xfs_trans_commit(tp);
++
++out_cancel:
++ xfs_trans_cancel(tp);
++ return error;
++}
++
+ /*
+ * When this is called, all of the log intent items which did not have
+ * corresponding log done items should be in the AIL. What we do now
+@@ -4823,10 +4866,12 @@ STATIC int
+ xlog_recover_process_intents(
+ struct xlog *log)
+ {
+- struct xfs_log_item *lip;
+- int error = 0;
++ struct xfs_defer_ops dfops;
+ struct xfs_ail_cursor cur;
++ struct xfs_log_item *lip;
+ struct xfs_ail *ailp;
++ xfs_fsblock_t firstfsb;
++ int error = 0;
+ #if defined(DEBUG) || defined(XFS_WARN)
+ xfs_lsn_t last_lsn;
+ #endif
+@@ -4837,6 +4882,7 @@ xlog_recover_process_intents(
+ #if defined(DEBUG) || defined(XFS_WARN)
+ last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
+ #endif
++ xfs_defer_init(&dfops, &firstfsb);
+ while (lip != NULL) {
+ /*
+ * We're done when we see something other than an intent.
+@@ -4857,6 +4903,12 @@ xlog_recover_process_intents(
+ */
+ ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
+
++ /*
++ * NOTE: If your intent processing routine can create more
++ * deferred ops, you /must/ attach them to the dfops in this
++ * routine or else those subsequent intents will get
++ * replayed in the wrong order!
++ */
+ switch (lip->li_type) {
+ case XFS_LI_EFI:
+ error = xlog_recover_process_efi(log->l_mp, ailp, lip);
+@@ -4865,10 +4917,12 @@ xlog_recover_process_intents(
+ error = xlog_recover_process_rui(log->l_mp, ailp, lip);
+ break;
+ case XFS_LI_CUI:
+- error = xlog_recover_process_cui(log->l_mp, ailp, lip);
++ error = xlog_recover_process_cui(log->l_mp, ailp, lip,
++ &dfops);
+ break;
+ case XFS_LI_BUI:
+- error = xlog_recover_process_bui(log->l_mp, ailp, lip);
++ error = xlog_recover_process_bui(log->l_mp, ailp, lip,
++ &dfops);
+ break;
+ }
+ if (error)
+@@ -4878,6 +4932,11 @@ xlog_recover_process_intents(
+ out:
+ xfs_trans_ail_cursor_done(&cur);
+ spin_unlock(&ailp->xa_lock);
++ if (error)
++ xfs_defer_cancel(&dfops);
++ else
++ error = xlog_finish_defer_ops(log->l_mp, &dfops);
++
+ return error;
+ }
+
+diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
+index 8f2e2fac4255..3a55d6fc271b 100644
+--- a/fs/xfs/xfs_refcount_item.c
++++ b/fs/xfs/xfs_refcount_item.c
+@@ -393,7 +393,8 @@ xfs_cud_init(
+ int
+ xfs_cui_recover(
+ struct xfs_mount *mp,
+- struct xfs_cui_log_item *cuip)
++ struct xfs_cui_log_item *cuip,
++ struct xfs_defer_ops *dfops)
+ {
+ int i;
+ int error = 0;
+@@ -405,11 +406,9 @@ xfs_cui_recover(
+ struct xfs_trans *tp;
+ struct xfs_btree_cur *rcur = NULL;
+ enum xfs_refcount_intent_type type;
+- xfs_fsblock_t firstfsb;
+ xfs_fsblock_t new_fsb;
+ xfs_extlen_t new_len;
+ struct xfs_bmbt_irec irec;
+- struct xfs_defer_ops dfops;
+ bool requeue_only = false;
+
+ ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags));
+@@ -465,7 +464,6 @@ xfs_cui_recover(
+ return error;
+ cudp = xfs_trans_get_cud(tp, cuip);
+
+- xfs_defer_init(&dfops, &firstfsb);
+ for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
+ refc = &cuip->cui_format.cui_extents[i];
+ refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
+@@ -485,7 +483,7 @@ xfs_cui_recover(
+ new_len = refc->pe_len;
+ } else
+ error = xfs_trans_log_finish_refcount_update(tp, cudp,
+- &dfops, type, refc->pe_startblock, refc->pe_len,
++ dfops, type, refc->pe_startblock, refc->pe_len,
+ &new_fsb, &new_len, &rcur);
+ if (error)
+ goto abort_error;
+@@ -497,21 +495,21 @@ xfs_cui_recover(
+ switch (type) {
+ case XFS_REFCOUNT_INCREASE:
+ error = xfs_refcount_increase_extent(
+- tp->t_mountp, &dfops, &irec);
++ tp->t_mountp, dfops, &irec);
+ break;
+ case XFS_REFCOUNT_DECREASE:
+ error = xfs_refcount_decrease_extent(
+- tp->t_mountp, &dfops, &irec);
++ tp->t_mountp, dfops, &irec);
+ break;
+ case XFS_REFCOUNT_ALLOC_COW:
+ error = xfs_refcount_alloc_cow_extent(
+- tp->t_mountp, &dfops,
++ tp->t_mountp, dfops,
+ irec.br_startblock,
+ irec.br_blockcount);
+ break;
+ case XFS_REFCOUNT_FREE_COW:
+ error = xfs_refcount_free_cow_extent(
+- tp->t_mountp, &dfops,
++ tp->t_mountp, dfops,
+ irec.br_startblock,
+ irec.br_blockcount);
+ break;
+@@ -525,17 +523,12 @@ xfs_cui_recover(
+ }
+
+ xfs_refcount_finish_one_cleanup(tp, rcur, error);
+- error = xfs_defer_finish(&tp, &dfops);
+- if (error)
+- goto abort_defer;
+ set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
+ error = xfs_trans_commit(tp);
+ return error;
+
+ abort_error:
+ xfs_refcount_finish_one_cleanup(tp, rcur, error);
+-abort_defer:
+- xfs_defer_cancel(&dfops);
+ xfs_trans_cancel(tp);
+ return error;
+ }
+diff --git a/fs/xfs/xfs_refcount_item.h b/fs/xfs/xfs_refcount_item.h
+index 5b74dddfa64b..0e5327349a13 100644
+--- a/fs/xfs/xfs_refcount_item.h
++++ b/fs/xfs/xfs_refcount_item.h
+@@ -96,6 +96,7 @@ struct xfs_cud_log_item *xfs_cud_init(struct xfs_mount *,
+ struct xfs_cui_log_item *);
+ void xfs_cui_item_free(struct xfs_cui_log_item *);
+ void xfs_cui_release(struct xfs_cui_log_item *);
+-int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip);
++int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip,
++ struct xfs_defer_ops *dfops);
+
+ #endif /* __XFS_REFCOUNT_ITEM_H__ */
+diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h
+new file mode 100644
+index 000000000000..c50e057ea17e
+--- /dev/null
++++ b/include/crypto/gcm.h
+@@ -0,0 +1,8 @@
++#ifndef _CRYPTO_GCM_H
++#define _CRYPTO_GCM_H
++
++#define GCM_AES_IV_SIZE 12
++#define GCM_RFC4106_IV_SIZE 8
++#define GCM_RFC4543_IV_SIZE 8
++
++#endif
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 6882538eda32..5a8019befafd 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -714,6 +714,9 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
+ unsigned long len);
+ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
+
++void kvm_sigset_activate(struct kvm_vcpu *vcpu);
++void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
++
+ void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
+ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 7ac8ba208b1f..0a6c71e0ad01 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -405,6 +405,8 @@ extern const char *tty_name(const struct tty_struct *tty);
+ extern struct tty_struct *tty_kopen(dev_t device);
+ extern void tty_kclose(struct tty_struct *tty);
+ extern int tty_dev_name_to_number(const char *name, dev_t *number);
++extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
++extern void tty_ldisc_unlock(struct tty_struct *tty);
+ #else
+ static inline void tty_kref_put(struct tty_struct *tty)
+ { }
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 885690fa39c8..4f1d2dec43ce 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -4470,18 +4470,24 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
+ * ieee80211_nullfunc_get - retrieve a nullfunc template
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
++ * @qos_ok: QoS NDP is acceptable to the caller, this should be set
++ * if at all possible
+ *
+ * Creates a Nullfunc template which can, for example, uploaded to
+ * hardware. The template must be updated after association so that correct
+ * BSSID and address is used.
+ *
++ * If @qos_ndp is set and the association is to an AP with QoS/WMM, the
++ * returned packet will be QoS NDP.
++ *
+ * Note: Caller (or hardware) is responsible for setting the
+ * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields.
+ *
+ * Return: The nullfunc template. %NULL on error.
+ */
+ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
+- struct ieee80211_vif *vif);
++ struct ieee80211_vif *vif,
++ bool qos_ok);
+
+ /**
+ * ieee80211_probereq_get - retrieve a Probe Request template
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index ebe96796027a..a58caf5807ff 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -49,6 +49,7 @@ enum rxrpc_conn_trace {
+ rxrpc_conn_put_client,
+ rxrpc_conn_put_service,
+ rxrpc_conn_queued,
++ rxrpc_conn_reap_service,
+ rxrpc_conn_seen,
+ };
+
+@@ -206,6 +207,7 @@ enum rxrpc_congest_change {
+ EM(rxrpc_conn_put_client, "PTc") \
+ EM(rxrpc_conn_put_service, "PTs") \
+ EM(rxrpc_conn_queued, "QUE") \
++ EM(rxrpc_conn_reap_service, "RPs") \
+ E_(rxrpc_conn_seen, "SEE")
+
+ #define rxrpc_client_traces \
+diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
+index 26283fefdf5f..f7015aa12347 100644
+--- a/include/uapi/linux/kfd_ioctl.h
++++ b/include/uapi/linux/kfd_ioctl.h
+@@ -233,29 +233,29 @@ struct kfd_ioctl_wait_events_args {
+ };
+
+ struct kfd_ioctl_set_scratch_backing_va_args {
+- uint64_t va_addr; /* to KFD */
+- uint32_t gpu_id; /* to KFD */
+- uint32_t pad;
++ __u64 va_addr; /* to KFD */
++ __u32 gpu_id; /* to KFD */
++ __u32 pad;
+ };
+
+ struct kfd_ioctl_get_tile_config_args {
+ /* to KFD: pointer to tile array */
+- uint64_t tile_config_ptr;
++ __u64 tile_config_ptr;
+ /* to KFD: pointer to macro tile array */
+- uint64_t macro_tile_config_ptr;
++ __u64 macro_tile_config_ptr;
+ /* to KFD: array size allocated by user mode
+ * from KFD: array size filled by kernel
+ */
+- uint32_t num_tile_configs;
++ __u32 num_tile_configs;
+ /* to KFD: array size allocated by user mode
+ * from KFD: array size filled by kernel
+ */
+- uint32_t num_macro_tile_configs;
++ __u32 num_macro_tile_configs;
+
+- uint32_t gpu_id; /* to KFD */
+- uint32_t gb_addr_config; /* from KFD */
+- uint32_t num_banks; /* from KFD */
+- uint32_t num_ranks; /* from KFD */
++ __u32 gpu_id; /* to KFD */
++ __u32 gb_addr_config; /* from KFD */
++ __u32 num_banks; /* from KFD */
++ __u32 num_ranks; /* from KFD */
+ /* struct size can be extended later if needed
+ * without breaking ABI compatibility
+ */
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 24ebad5567b4..8c20af8738ac 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6756,6 +6756,7 @@ static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
+ ns_inode = ns_path.dentry->d_inode;
+ ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
+ ns_link_info->ino = ns_inode->i_ino;
++ path_put(&ns_path);
+ }
+ }
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 52b3f4703158..046cd780d057 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2311,9 +2311,6 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+
+ oldowner = pi_state->owner;
+- /* Owner died? */
+- if (!pi_state->owner)
+- newtid |= FUTEX_OWNER_DIED;
+
+ /*
+ * We are here because either:
+@@ -2374,6 +2371,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ }
+
+ newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
++ /* Owner died? */
++ if (!pi_state->owner)
++ newtid |= FUTEX_OWNER_DIED;
+
+ if (get_futex_value_locked(&uval, uaddr))
+ goto handle_fault;
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index 64a4c76cba2b..e7008688769b 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -371,6 +371,7 @@ static ssize_t config_num_requests_store(struct device *dev,
+ if (test_fw_config->reqs) {
+ pr_err("Must call release_all_firmware prior to changing config\n");
+ rc = -EINVAL;
++ mutex_unlock(&test_fw_mutex);
+ goto out;
+ }
+ mutex_unlock(&test_fw_mutex);
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 7780cd83a495..a1ba553816eb 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -1532,6 +1532,8 @@ static void kmemleak_scan(void)
+ if (page_count(page) == 0)
+ continue;
+ scan_block(page, page + 1, NULL);
++ if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page))))
++ cond_resched();
+ }
+ }
+ put_online_mems();
+diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
+index d8bbd0d2225a..d6d3f316de4c 100644
+--- a/net/mac80211/mesh_hwmp.c
++++ b/net/mac80211/mesh_hwmp.c
+@@ -797,7 +797,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ struct mesh_path *mpath;
+ u8 ttl, flags, hopcount;
+ const u8 *orig_addr;
+- u32 orig_sn, metric, metric_txsta, interval;
++ u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
+ bool root_is_gate;
+
+ ttl = rann->rann_ttl;
+@@ -808,7 +808,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ interval = le32_to_cpu(rann->rann_interval);
+ hopcount = rann->rann_hopcount;
+ hopcount++;
+- metric = le32_to_cpu(rann->rann_metric);
++ orig_metric = le32_to_cpu(rann->rann_metric);
+
+ /* Ignore our own RANNs */
+ if (ether_addr_equal(orig_addr, sdata->vif.addr))
+@@ -825,7 +825,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ return;
+ }
+
+- metric_txsta = airtime_link_metric_get(local, sta);
++ last_hop_metric = airtime_link_metric_get(local, sta);
++ new_metric = orig_metric + last_hop_metric;
++ if (new_metric < orig_metric)
++ new_metric = MAX_METRIC;
+
+ mpath = mesh_path_lookup(sdata, orig_addr);
+ if (!mpath) {
+@@ -838,7 +841,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ }
+
+ if (!(SN_LT(mpath->sn, orig_sn)) &&
+- !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
++ !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
+ rcu_read_unlock();
+ return;
+ }
+@@ -856,7 +859,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ }
+
+ mpath->sn = orig_sn;
+- mpath->rann_metric = metric + metric_txsta;
++ mpath->rann_metric = new_metric;
+ mpath->is_root = true;
+ /* Recording RANNs sender address to send individually
+ * addressed PREQs destined for root mesh STA */
+@@ -876,7 +879,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
+ orig_sn, 0, NULL, 0, broadcast_addr,
+ hopcount, ttl, interval,
+- metric + metric_txsta, 0, sdata);
++ new_metric, 0, sdata);
+ }
+
+ rcu_read_unlock();
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 3b8e2709d8de..9115cc52ce83 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -908,7 +908,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
+ struct ieee80211_hdr_3addr *nullfunc;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
+- skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
++ skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true);
+ if (!skb)
+ return;
+
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 94826680cf2b..73429841f115 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -4404,13 +4404,15 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
+ EXPORT_SYMBOL(ieee80211_pspoll_get);
+
+ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
+- struct ieee80211_vif *vif)
++ struct ieee80211_vif *vif,
++ bool qos_ok)
+ {
+ struct ieee80211_hdr_3addr *nullfunc;
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_if_managed *ifmgd;
+ struct ieee80211_local *local;
+ struct sk_buff *skb;
++ bool qos = false;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
+ return NULL;
+@@ -4419,7 +4421,17 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
+ ifmgd = &sdata->u.mgd;
+ local = sdata->local;
+
+- skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
++ if (qos_ok) {
++ struct sta_info *sta;
++
++ rcu_read_lock();
++ sta = sta_info_get(sdata, ifmgd->bssid);
++ qos = sta && sta->sta.wme;
++ rcu_read_unlock();
++ }
++
++ skb = dev_alloc_skb(local->hw.extra_tx_headroom +
++ sizeof(*nullfunc) + 2);
+ if (!skb)
+ return NULL;
+
+@@ -4429,6 +4441,19 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
+ nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_TODS);
++ if (qos) {
++ __le16 qos = cpu_to_le16(7);
++
++ BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
++ IEEE80211_STYPE_NULLFUNC) !=
++ IEEE80211_STYPE_QOS_NULLFUNC);
++ nullfunc->frame_control |=
++ cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
++ skb->priority = 7;
++ skb_set_queue_mapping(skb, IEEE80211_AC_VO);
++ skb_put_data(skb, &qos, sizeof(qos));
++ }
++
+ memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
+ memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index e8eb427ce6d1..0d9f6afa266c 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -1903,14 +1903,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
+
+ #define MAX_ACTIONS_BUFSIZE (32 * 1024)
+
+-static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
++static struct sw_flow_actions *nla_alloc_flow_actions(int size)
+ {
+ struct sw_flow_actions *sfa;
+
+- if (size > MAX_ACTIONS_BUFSIZE) {
+- OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
+- return ERR_PTR(-EINVAL);
+- }
++ WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
+
+ sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
+ if (!sfa)
+@@ -1983,12 +1980,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
+ new_acts_size = ksize(*sfa) * 2;
+
+ if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
+- if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
++ if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
++ OVS_NLERR(log, "Flow action size exceeds max %u",
++ MAX_ACTIONS_BUFSIZE);
+ return ERR_PTR(-EMSGSIZE);
++ }
+ new_acts_size = MAX_ACTIONS_BUFSIZE;
+ }
+
+- acts = nla_alloc_flow_actions(new_acts_size, log);
++ acts = nla_alloc_flow_actions(new_acts_size);
+ if (IS_ERR(acts))
+ return (void *)acts;
+
+@@ -2660,7 +2660,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
+ {
+ int err;
+
+- *sfa = nla_alloc_flow_actions(nla_len(attr), log);
++ *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
+ if (IS_ERR(*sfa))
+ return PTR_ERR(*sfa);
+
+diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
+index 4b0a8288c98a..7c1cb08874d5 100644
+--- a/net/rxrpc/af_rxrpc.c
++++ b/net/rxrpc/af_rxrpc.c
+@@ -823,6 +823,19 @@ static int rxrpc_release_sock(struct sock *sk)
+ sock_orphan(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
++ /* We want to kill off all connections from a service socket
++ * as fast as possible because we can't share these; client
++ * sockets, on the other hand, can share an endpoint.
++ */
++ switch (sk->sk_state) {
++ case RXRPC_SERVER_BOUND:
++ case RXRPC_SERVER_BOUND2:
++ case RXRPC_SERVER_LISTENING:
++ case RXRPC_SERVER_LISTEN_DISABLED:
++ rx->local->service_closed = true;
++ break;
++ }
++
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ sk->sk_state = RXRPC_CLOSE;
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index ea5600b747cc..e6c2c4f56fb1 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -84,6 +84,7 @@ struct rxrpc_net {
+ unsigned int nr_client_conns;
+ unsigned int nr_active_client_conns;
+ bool kill_all_client_conns;
++ bool live;
+ spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
+ spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
+ struct list_head waiting_client_conns;
+@@ -265,6 +266,7 @@ struct rxrpc_local {
+ rwlock_t services_lock; /* lock for services list */
+ int debug_id; /* debug ID for printks */
+ bool dead;
++ bool service_closed; /* Service socket closed */
+ struct sockaddr_rxrpc srx; /* local address */
+ };
+
+@@ -671,7 +673,7 @@ extern unsigned int rxrpc_max_call_lifetime;
+ extern struct kmem_cache *rxrpc_call_jar;
+
+ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
+-struct rxrpc_call *rxrpc_alloc_call(gfp_t);
++struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t);
+ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
+ struct rxrpc_conn_parameters *,
+ struct sockaddr_rxrpc *,
+@@ -824,6 +826,7 @@ void rxrpc_process_connection(struct work_struct *);
+ * conn_object.c
+ */
+ extern unsigned int rxrpc_connection_expiry;
++extern unsigned int rxrpc_closed_conn_expiry;
+
+ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
+ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
+diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
+index cbd1701e813a..3028298ca561 100644
+--- a/net/rxrpc/call_accept.c
++++ b/net/rxrpc/call_accept.c
+@@ -94,7 +94,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
+ /* Now it gets complicated, because calls get registered with the
+ * socket here, particularly if a user ID is preassigned by the user.
+ */
+- call = rxrpc_alloc_call(gfp);
++ call = rxrpc_alloc_call(rx, gfp);
+ if (!call)
+ return -ENOMEM;
+ call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
+diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
+index fcdd6555a820..8a5a42e8ec23 100644
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -55,6 +55,8 @@ static void rxrpc_call_timer_expired(unsigned long _call)
+ rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
+ }
+
++static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
++
+ /*
+ * find an extant server call
+ * - called in process context with IRQs enabled
+@@ -95,7 +97,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
+ /*
+ * allocate a new call
+ */
+-struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
++struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
+ {
+ struct rxrpc_call *call;
+
+@@ -114,6 +116,14 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
+ goto nomem_2;
+
+ mutex_init(&call->user_mutex);
++
++ /* Prevent lockdep reporting a deadlock false positive between the afs
++ * filesystem and sys_sendmsg() via the mmap sem.
++ */
++ if (rx->sk.sk_kern_sock)
++ lockdep_set_class(&call->user_mutex,
++ &rxrpc_call_user_mutex_lock_class_key);
++
+ setup_timer(&call->timer, rxrpc_call_timer_expired,
+ (unsigned long)call);
+ INIT_WORK(&call->processor, &rxrpc_process_call);
+@@ -150,7 +160,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
+ /*
+ * Allocate a new client call.
+ */
+-static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
++static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
++ struct sockaddr_rxrpc *srx,
+ gfp_t gfp)
+ {
+ struct rxrpc_call *call;
+@@ -158,7 +169,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
+
+ _enter("");
+
+- call = rxrpc_alloc_call(gfp);
++ call = rxrpc_alloc_call(rx, gfp);
+ if (!call)
+ return ERR_PTR(-ENOMEM);
+ call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
+@@ -209,7 +220,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
+
+ _enter("%p,%lx", rx, user_call_ID);
+
+- call = rxrpc_alloc_client_call(srx, gfp);
++ call = rxrpc_alloc_client_call(rx, srx, gfp);
+ if (IS_ERR(call)) {
+ release_sock(&rx->sk);
+ _leave(" = %ld", PTR_ERR(call));
+diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
+index 5f9624bd311c..78a154173d90 100644
+--- a/net/rxrpc/conn_client.c
++++ b/net/rxrpc/conn_client.c
+@@ -1061,6 +1061,8 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
+ expiry = rxrpc_conn_idle_client_expiry;
+ if (nr_conns > rxrpc_reap_client_connections)
+ expiry = rxrpc_conn_idle_client_fast_expiry;
++ if (conn->params.local->service_closed)
++ expiry = rxrpc_closed_conn_expiry * HZ;
+
+ conn_expires_at = conn->idle_timestamp + expiry;
+
+diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
+index fe575798592f..a48c817b792b 100644
+--- a/net/rxrpc/conn_object.c
++++ b/net/rxrpc/conn_object.c
+@@ -20,7 +20,8 @@
+ /*
+ * Time till a connection expires after last use (in seconds).
+ */
+-unsigned int rxrpc_connection_expiry = 10 * 60;
++unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
++unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
+
+ static void rxrpc_destroy_connection(struct rcu_head *);
+
+@@ -312,7 +313,7 @@ void rxrpc_put_service_conn(struct rxrpc_connection *conn)
+ n = atomic_dec_return(&conn->usage);
+ trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
+ ASSERTCMP(n, >=, 0);
+- if (n == 0) {
++ if (n == 1) {
+ rxnet = conn->params.local->rxnet;
+ rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0);
+ }
+@@ -353,15 +354,14 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
+ struct rxrpc_net *rxnet =
+ container_of(to_delayed_work(work),
+ struct rxrpc_net, service_conn_reaper);
+- unsigned long reap_older_than, earliest, idle_timestamp, now;
++ unsigned long expire_at, earliest, idle_timestamp, now;
+
+ LIST_HEAD(graveyard);
+
+ _enter("");
+
+ now = jiffies;
+- reap_older_than = now - rxrpc_connection_expiry * HZ;
+- earliest = ULONG_MAX;
++ earliest = now + MAX_JIFFY_OFFSET;
+
+ write_lock(&rxnet->conn_lock);
+ list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
+@@ -371,15 +371,21 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
+ if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
+ continue;
+
+- idle_timestamp = READ_ONCE(conn->idle_timestamp);
+- _debug("reap CONN %d { u=%d,t=%ld }",
+- conn->debug_id, atomic_read(&conn->usage),
+- (long)reap_older_than - (long)idle_timestamp);
+-
+- if (time_after(idle_timestamp, reap_older_than)) {
+- if (time_before(idle_timestamp, earliest))
+- earliest = idle_timestamp;
+- continue;
++ if (rxnet->live) {
++ idle_timestamp = READ_ONCE(conn->idle_timestamp);
++ expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
++ if (conn->params.local->service_closed)
++ expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
++
++ _debug("reap CONN %d { u=%d,t=%ld }",
++ conn->debug_id, atomic_read(&conn->usage),
++ (long)expire_at - (long)now);
++
++ if (time_before(now, expire_at)) {
++ if (time_before(expire_at, earliest))
++ earliest = expire_at;
++ continue;
++ }
+ }
+
+ /* The usage count sits at 1 whilst the object is unused on the
+@@ -387,6 +393,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
+ */
+ if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
+ continue;
++ trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0);
+
+ if (rxrpc_conn_is_client(conn))
+ BUG();
+@@ -397,10 +404,10 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
+ }
+ write_unlock(&rxnet->conn_lock);
+
+- if (earliest != ULONG_MAX) {
+- _debug("reschedule reaper %ld", (long) earliest - now);
++ if (earliest != now + MAX_JIFFY_OFFSET) {
++ _debug("reschedule reaper %ld", (long)earliest - (long)now);
+ ASSERT(time_after(earliest, now));
+- rxrpc_queue_delayed_work(&rxnet->client_conn_reaper,
++ rxrpc_queue_delayed_work(&rxnet->service_conn_reaper,
+ earliest - now);
+ }
+
+@@ -429,7 +436,6 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
+
+ rxrpc_destroy_all_client_connections(rxnet);
+
+- rxrpc_connection_expiry = 0;
+ cancel_delayed_work(&rxnet->client_conn_reaper);
+ rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0);
+ flush_workqueue(rxrpc_workqueue);
+diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
+index 7edceb8522f5..684c51d600c7 100644
+--- a/net/rxrpc/net_ns.c
++++ b/net/rxrpc/net_ns.c
+@@ -22,6 +22,7 @@ static __net_init int rxrpc_init_net(struct net *net)
+ struct rxrpc_net *rxnet = rxrpc_net(net);
+ int ret;
+
++ rxnet->live = true;
+ get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
+ rxnet->epoch |= RXRPC_RANDOM_EPOCH;
+
+@@ -60,6 +61,7 @@ static __net_init int rxrpc_init_net(struct net *net)
+ return 0;
+
+ err_proc:
++ rxnet->live = false;
+ return ret;
+ }
+
+@@ -70,6 +72,7 @@ static __net_exit void rxrpc_exit_net(struct net *net)
+ {
+ struct rxrpc_net *rxnet = rxrpc_net(net);
+
++ rxnet->live = false;
+ rxrpc_destroy_all_calls(rxnet);
+ rxrpc_destroy_all_connections(rxnet);
+ rxrpc_destroy_all_locals(rxnet);
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 9ea6f972767e..d2f51d6a253c 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -563,8 +563,8 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ /* The socket is now unlocked. */
+ if (IS_ERR(call))
+ return PTR_ERR(call);
+- rxrpc_put_call(call, rxrpc_call_put);
+- return 0;
++ ret = 0;
++ goto out_put_unlock;
+ }
+
+ call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID);
+@@ -633,6 +633,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ ret = rxrpc_send_data(rx, call, msg, len, NULL);
+ }
+
++out_put_unlock:
+ mutex_unlock(&call->user_mutex);
+ error_put:
+ rxrpc_put_call(call, rxrpc_call_put);
+diff --git a/net/sctp/stream.c b/net/sctp/stream.c
+index 724adf2786a2..9ea6057ed28b 100644
+--- a/net/sctp/stream.c
++++ b/net/sctp/stream.c
+@@ -224,6 +224,9 @@ int sctp_send_reset_assoc(struct sctp_association *asoc)
+ if (asoc->strreset_outstanding)
+ return -EINPROGRESS;
+
++ if (!sctp_outq_is_empty(&asoc->outqueue))
++ return -EAGAIN;
++
+ chunk = sctp_make_strreset_tsnreq(asoc);
+ if (!chunk)
+ return -ENOMEM;
+@@ -538,12 +541,18 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
+ i = asoc->strreset_inseq - request_seq - 1;
+ result = asoc->strreset_result[i];
+ if (result == SCTP_STRRESET_PERFORMED) {
+- next_tsn = asoc->next_tsn;
++ next_tsn = asoc->ctsn_ack_point + 1;
+ init_tsn =
+ sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1;
+ }
+ goto err;
+ }
++
++ if (!sctp_outq_is_empty(&asoc->outqueue)) {
++ result = SCTP_STRRESET_IN_PROGRESS;
++ goto err;
++ }
++
+ asoc->strreset_inseq++;
+
+ if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
+@@ -554,9 +563,10 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
+ goto out;
+ }
+
+- /* G3: The same processing as though a SACK chunk with no gap report
+- * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
+- * received MUST be performed.
++ /* G4: The same processing as though a FWD-TSN chunk (as defined in
++ * [RFC3758]) with all streams affected and a new cumulative TSN
++ * ACK of the Receiver's Next TSN minus 1 were received MUST be
++ * performed.
+ */
+ max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
+ sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
+@@ -571,10 +581,9 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
+ sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
+ init_tsn, GFP_ATOMIC);
+
+- /* G4: The same processing as though a FWD-TSN chunk (as defined in
+- * [RFC3758]) with all streams affected and a new cumulative TSN
+- * ACK of the Receiver's Next TSN minus 1 were received MUST be
+- * performed.
++ /* G3: The same processing as though a SACK chunk with no gap report
++ * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
++ * received MUST be performed.
+ */
+ sctp_outq_free(&asoc->outqueue);
+
+@@ -835,6 +844,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
+ if (result == SCTP_STRRESET_PERFORMED) {
+ __u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
+ &asoc->peer.tsn_map);
++ LIST_HEAD(temp);
+
+ sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
+ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+@@ -843,7 +853,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
+ SCTP_TSN_MAP_INITIAL,
+ stsn, GFP_ATOMIC);
+
++ /* Clean up sacked and abandoned queues only. As the
++ * out_chunk_list may not be empty, splice it to temp,
++ * then get it back after sctp_outq_free is done.
++ */
++ list_splice_init(&asoc->outqueue.out_chunk_list, &temp);
+ sctp_outq_free(&asoc->outqueue);
++ list_splice_init(&temp, &asoc->outqueue.out_chunk_list);
+
+ asoc->next_tsn = rtsn;
+ asoc->ctsn_ack_point = asoc->next_tsn - 1;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 4dad5da388d6..8cb40f8ffa5b 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2437,6 +2437,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ case -ECONNREFUSED:
+ case -ECONNRESET:
+ case -ENETUNREACH:
++ case -EHOSTUNREACH:
+ case -EADDRINUSE:
+ case -ENOBUFS:
+ /*
+diff --git a/scripts/faddr2line b/scripts/faddr2line
+index 1f5ce959f596..39e07d8574dd 100755
+--- a/scripts/faddr2line
++++ b/scripts/faddr2line
+@@ -44,9 +44,16 @@
+ set -o errexit
+ set -o nounset
+
++READELF="${CROSS_COMPILE}readelf"
++ADDR2LINE="${CROSS_COMPILE}addr2line"
++SIZE="${CROSS_COMPILE}size"
++NM="${CROSS_COMPILE}nm"
++
+ command -v awk >/dev/null 2>&1 || die "awk isn't installed"
+-command -v readelf >/dev/null 2>&1 || die "readelf isn't installed"
+-command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed"
++command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed"
++command -v ${ADDR2LINE} >/dev/null 2>&1 || die "addr2line isn't installed"
++command -v ${SIZE} >/dev/null 2>&1 || die "size isn't installed"
++command -v ${NM} >/dev/null 2>&1 || die "nm isn't installed"
+
+ usage() {
+ echo "usage: faddr2line <object file> <func+offset> <func+offset>..." >&2
+@@ -69,10 +76,10 @@ die() {
+ find_dir_prefix() {
+ local objfile=$1
+
+- local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
++ local start_kernel_addr=$(${READELF} -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
+ [[ -z $start_kernel_addr ]] && return
+
+- local file_line=$(addr2line -e $objfile $start_kernel_addr)
++ local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr)
+ [[ -z $file_line ]] && return
+
+ local prefix=${file_line%init/main.c:*}
+@@ -104,7 +111,7 @@ __faddr2line() {
+
+ # Go through each of the object's symbols which match the func name.
+ # In rare cases there might be duplicates.
+- file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}')
++ file_end=$(${SIZE} -Ax $objfile | awk '$1 == ".text" {print $2}')
+ while read symbol; do
+ local fields=($symbol)
+ local sym_base=0x${fields[0]}
+@@ -156,10 +163,10 @@ __faddr2line() {
+
+ # pass real address to addr2line
+ echo "$func+$offset/$sym_size:"
+- addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
++ ${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
+ DONE=1
+
+- done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
++ done < <(${NM} -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
+ }
+
+ [[ $# -lt 2 ]] && usage
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index 95209a5f8595..8daf16e1d421 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -743,7 +743,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ case Opt_fsuuid:
+ ima_log_string(ab, "fsuuid", args[0].from);
+
+- if (uuid_is_null(&entry->fsuuid)) {
++ if (!uuid_is_null(&entry->fsuuid)) {
+ result = -EINVAL;
+ break;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 145e92d6ca94..b2d039537d5e 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3131,11 +3131,13 @@ static void alc256_shutup(struct hda_codec *codec)
+ if (hp_pin_sense)
+ msleep(85);
+
++ /* 3k pull low control for Headset jack. */
++ /* NOTE: call this before clearing the pin, otherwise codec stalls */
++ alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
++
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+- alc_update_coef_idx(codec, 0x46, 0, 3 << 12); /* 3k pull low control for Headset jack. */
+-
+ if (hp_pin_sense)
+ msleep(100);
+
+diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
+index 1c14c2595158..4b36323ea64b 100644
+--- a/tools/gpio/gpio-event-mon.c
++++ b/tools/gpio/gpio-event-mon.c
+@@ -23,6 +23,7 @@
+ #include <getopt.h>
+ #include <inttypes.h>
+ #include <sys/ioctl.h>
++#include <sys/types.h>
+ #include <linux/gpio.h>
+
+ int monitor_device(const char *device_name,
+diff --git a/tools/power/cpupower/bench/system.c b/tools/power/cpupower/bench/system.c
+index c25a74ae51ba..2bb3eef7d5c1 100644
+--- a/tools/power/cpupower/bench/system.c
++++ b/tools/power/cpupower/bench/system.c
+@@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu)
+
+ dprintf("set %s as cpufreq governor\n", governor);
+
+- if (cpupower_is_cpu_online(cpu) != 0) {
++ if (cpupower_is_cpu_online(cpu) != 1) {
+ perror("cpufreq_cpu_exists");
+ fprintf(stderr, "error: cpu %u does not exist\n", cpu);
+ return -1;
+diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
+index 1b5da0066ebf..5b3205f16217 100644
+--- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
++++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
+@@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void)
+ {
+ int num;
+ char *tmp;
++ int this_cpu;
++
++ this_cpu = sched_getcpu();
+
+ /* Assume idle state count is the same for all CPUs */
+- cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0);
++ cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu);
+
+ if (cpuidle_sysfs_monitor.hw_states_num <= 0)
+ return NULL;
+
+ for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
+- tmp = cpuidle_state_name(0, num);
++ tmp = cpuidle_state_name(this_cpu, num);
+ if (tmp == NULL)
+ continue;
+
+@@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void)
+ strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
+ free(tmp);
+
+- tmp = cpuidle_state_desc(0, num);
++ tmp = cpuidle_state_desc(this_cpu, num);
+ if (tmp == NULL)
+ continue;
+ strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1);
+diff --git a/tools/usb/usbip/src/usbip_bind.c b/tools/usb/usbip/src/usbip_bind.c
+index fa46141ae68b..e121cfb1746a 100644
+--- a/tools/usb/usbip/src/usbip_bind.c
++++ b/tools/usb/usbip/src/usbip_bind.c
+@@ -144,6 +144,7 @@ static int bind_device(char *busid)
+ int rc;
+ struct udev *udev;
+ struct udev_device *dev;
++ const char *devpath;
+
+ /* Check whether the device with this bus ID exists. */
+ udev = udev_new();
+@@ -152,8 +153,16 @@ static int bind_device(char *busid)
+ err("device with the specified bus ID does not exist");
+ return -1;
+ }
++ devpath = udev_device_get_devpath(dev);
+ udev_unref(udev);
+
++ /* If the device is already attached to vhci_hcd - bail out */
++ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
++ err("bind loop detected: device: %s is attached to %s\n",
++ devpath, USBIP_VHCI_DRV_NAME);
++ return -1;
++ }
++
+ rc = unbind_other(busid);
+ if (rc == UNBIND_ST_FAILED) {
+ err("could not unbind driver from device on busid %s", busid);
+diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c
+index f1b38e866dd7..d65a9f444174 100644
+--- a/tools/usb/usbip/src/usbip_list.c
++++ b/tools/usb/usbip/src/usbip_list.c
+@@ -187,6 +187,7 @@ static int list_devices(bool parsable)
+ const char *busid;
+ char product_name[128];
+ int ret = -1;
++ const char *devpath;
+
+ /* Create libudev context. */
+ udev = udev_new();
+@@ -209,6 +210,14 @@ static int list_devices(bool parsable)
+ path = udev_list_entry_get_name(dev_list_entry);
+ dev = udev_device_new_from_syspath(udev, path);
+
++ /* Ignore devices attached to vhci_hcd */
++ devpath = udev_device_get_devpath(dev);
++ if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
++ dbg("Skip the device %s already attached to %s\n",
++ devpath, USBIP_VHCI_DRV_NAME);
++ continue;
++ }
++
+ /* Get device information. */
+ idVendor = udev_device_get_sysattr_value(dev, "idVendor");
+ idProduct = udev_device_get_sysattr_value(dev, "idProduct");
+diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
+index 95cba0799828..9a07ee94a230 100644
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -612,7 +612,6 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+ int ret;
+- sigset_t sigsaved;
+
+ if (unlikely(!kvm_vcpu_initialized(vcpu)))
+ return -ENOEXEC;
+@@ -630,8 +629,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ if (run->immediate_exit)
+ return -EINTR;
+
+- if (vcpu->sigset_active)
+- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
++ kvm_sigset_activate(vcpu);
+
+ ret = 1;
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+@@ -753,8 +751,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ kvm_pmu_update_run(vcpu);
+ }
+
+- if (vcpu->sigset_active)
+- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
++ kvm_sigset_deactivate(vcpu);
++
+ return ret;
+ }
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 2447d7c017e7..8401774f5aeb 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2073,6 +2073,29 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
+ }
+ EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
+
++void kvm_sigset_activate(struct kvm_vcpu *vcpu)
++{
++ if (!vcpu->sigset_active)
++ return;
++
++ /*
++ * This does a lockless modification of ->real_blocked, which is fine
++ * because, only current can change ->real_blocked and all readers of
++ * ->real_blocked don't care as long ->real_blocked is always a subset
++ * of ->blocked.
++ */
++ sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
++}
++
++void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
++{
++ if (!vcpu->sigset_active)
++ return;
++
++ sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
++ sigemptyset(&current->real_blocked);
++}
++
+ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
+ {
+ unsigned int old, val, grow;