summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2023-12-08 05:55:00 -0500
committerMike Pagano <mpagano@gentoo.org>2023-12-08 05:55:00 -0500
commitbd4a200f012c938193a86da7d829c88acee96bda (patch)
tree4c58dca26e49e3b481f0f65158184a61ff1a834d
parentLinux patch 6.1.65 (diff)
downloadlinux-patches-bd4a200f012c938193a86da7d829c88acee96bda.tar.gz
linux-patches-bd4a200f012c938193a86da7d829c88acee96bda.tar.bz2
linux-patches-bd4a200f012c938193a86da7d829c88acee96bda.zip
Linux patch 6.1.666.1-74
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1065_linux-6.1.66.patch4837
2 files changed, 4841 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 08e6f2e1..37486bab 100644
--- a/0000_README
+++ b/0000_README
@@ -303,6 +303,10 @@ Patch: 1064_linux-6.1.65.patch
From: https://www.kernel.org
Desc: Linux 6.1.65
+Patch: 1065_linux-6.1.66.patch
+From: https://www.kernel.org
+Desc: Linux 6.1.66
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1065_linux-6.1.66.patch b/1065_linux-6.1.66.patch
new file mode 100644
index 00000000..eae06551
--- /dev/null
+++ b/1065_linux-6.1.66.patch
@@ -0,0 +1,4837 @@
+diff --git a/Makefile b/Makefile
+index 1646e334a647f..5d7e995d686c8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 65
++SUBLEVEL = 66
+ EXTRAVERSION =
+ NAME = Curry Ramen
+
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index d12fdb9c05a89..eace3607fef41 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -204,7 +204,7 @@ static void xen_power_off(void)
+
+ static irqreturn_t xen_arm_callback(int irq, void *arg)
+ {
+- xen_hvm_evtchn_do_upcall();
++ xen_evtchn_do_upcall();
+ return IRQ_HANDLED;
+ }
+
+diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h
+index 1ed45fd085d3b..1eb488f25b838 100644
+--- a/arch/parisc/include/asm/alternative.h
++++ b/arch/parisc/include/asm/alternative.h
+@@ -34,7 +34,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
+
+ /* Alternative SMP implementation. */
+ #define ALTERNATIVE(cond, replacement) "!0:" \
+- ".section .altinstructions, \"aw\" !" \
++ ".section .altinstructions, \"a\" !" \
++ ".align 4 !" \
+ ".word (0b-4-.) !" \
+ ".hword 1, " __stringify(cond) " !" \
+ ".word " __stringify(replacement) " !" \
+@@ -44,7 +45,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
+
+ /* to replace one single instructions by a new instruction */
+ #define ALTERNATIVE(from, to, cond, replacement)\
+- .section .altinstructions, "aw" ! \
++ .section .altinstructions, "a" ! \
++ .align 4 ! \
+ .word (from - .) ! \
+ .hword (to - from)/4, cond ! \
+ .word replacement ! \
+@@ -52,7 +54,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
+
+ /* to replace multiple instructions by new code */
+ #define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\
+- .section .altinstructions, "aw" ! \
++ .section .altinstructions, "a" ! \
++ .align 4 ! \
+ .word (from - .) ! \
+ .hword -num_instructions, cond ! \
+ .word (new_instr_ptr - .) ! \
+diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
+index 75677b526b2bb..74d17d7e759da 100644
+--- a/arch/parisc/include/asm/assembly.h
++++ b/arch/parisc/include/asm/assembly.h
+@@ -574,6 +574,7 @@
+ */
+ #define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \
+ .section __ex_table,"aw" ! \
++ .align 4 ! \
+ .word (fault_addr - .), (except_addr - .) ! \
+ .previous
+
+diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
+index 4b6d60b941247..b9cad0bb4461b 100644
+--- a/arch/parisc/include/asm/bug.h
++++ b/arch/parisc/include/asm/bug.h
+@@ -28,13 +28,15 @@
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+- "\t.pushsection __bug_table,\"aw\"\n" \
++ "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.align %4\n" \
+ "2:\t" ASM_WORD_INSN "1b, %c0\n" \
+- "\t.short %c1, %c2\n" \
+- "\t.org 2b+%c3\n" \
++ "\t.short %1, %2\n" \
++ "\t.blockz %3-2*%4-2*2\n" \
+ "\t.popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+- "i" (0), "i" (sizeof(struct bug_entry)) ); \
++ "i" (0), "i" (sizeof(struct bug_entry)), \
++ "i" (sizeof(long)) ); \
+ unreachable(); \
+ } while(0)
+
+@@ -51,27 +53,31 @@
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+- "\t.pushsection __bug_table,\"aw\"\n" \
++ "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.align %4\n" \
+ "2:\t" ASM_WORD_INSN "1b, %c0\n" \
+- "\t.short %c1, %c2\n" \
+- "\t.org 2b+%c3\n" \
++ "\t.short %1, %2\n" \
++ "\t.blockz %3-2*%4-2*2\n" \
+ "\t.popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (BUGFLAG_WARNING|(flags)), \
+- "i" (sizeof(struct bug_entry)) ); \
++ "i" (sizeof(struct bug_entry)), \
++ "i" (sizeof(long)) ); \
+ } while(0)
+ #else
+ #define __WARN_FLAGS(flags) \
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+- "\t.pushsection __bug_table,\"aw\"\n" \
++ "\t.pushsection __bug_table,\"a\"\n" \
++ "\t.align %2\n" \
+ "2:\t" ASM_WORD_INSN "1b\n" \
+- "\t.short %c0\n" \
+- "\t.org 2b+%c1\n" \
++ "\t.short %0\n" \
++ "\t.blockz %1-%2-2\n" \
+ "\t.popsection" \
+ : : "i" (BUGFLAG_WARNING|(flags)), \
+- "i" (sizeof(struct bug_entry)) ); \
++ "i" (sizeof(struct bug_entry)), \
++ "i" (sizeof(long)) ); \
+ } while(0)
+ #endif
+
+diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h
+index af2a598bc0f81..94428798b6aa6 100644
+--- a/arch/parisc/include/asm/jump_label.h
++++ b/arch/parisc/include/asm/jump_label.h
+@@ -15,10 +15,12 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
+ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
++ ".align %1\n\t"
+ ".word 1b - ., %l[l_yes] - .\n\t"
+ __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+ ".popsection\n\t"
+- : : "i" (&((char *)key)[branch]) : : l_yes);
++ : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
++ : : l_yes);
+
+ return false;
+ l_yes:
+@@ -30,10 +32,12 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
+ asm_volatile_goto("1:\n\t"
+ "b,n %l[l_yes]\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
++ ".align %1\n\t"
+ ".word 1b - ., %l[l_yes] - .\n\t"
+ __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+ ".popsection\n\t"
+- : : "i" (&((char *)key)[branch]) : : l_yes);
++ : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
++ : : l_yes);
+
+ return false;
+ l_yes:
+diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
+index 10a061d6899cd..883a9ddbb6770 100644
+--- a/arch/parisc/include/asm/ldcw.h
++++ b/arch/parisc/include/asm/ldcw.h
+@@ -56,7 +56,7 @@
+ })
+
+ #ifdef CONFIG_SMP
+-# define __lock_aligned __section(".data..lock_aligned")
++# define __lock_aligned __section(".data..lock_aligned") __aligned(16)
+ #endif
+
+ #endif /* __PARISC_LDCW_H */
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 2bf660eabe421..4165079898d9e 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -41,6 +41,7 @@ struct exception_table_entry {
+
+ #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
+ ".section __ex_table,\"aw\"\n" \
++ ".align 4\n" \
+ ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
+ ".previous\n"
+
+diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
+index 87245c584784e..8d94739d75c67 100644
+--- a/arch/parisc/include/uapi/asm/errno.h
++++ b/arch/parisc/include/uapi/asm/errno.h
+@@ -75,7 +75,6 @@
+
+ /* We now return you to your regularly scheduled HPUX. */
+
+-#define ENOSYM 215 /* symbol does not exist in executable */
+ #define ENOTSOCK 216 /* Socket operation on non-socket */
+ #define EDESTADDRREQ 217 /* Destination address required */
+ #define EMSGSIZE 218 /* Message too long */
+@@ -101,7 +100,6 @@
+ #define ETIMEDOUT 238 /* Connection timed out */
+ #define ECONNREFUSED 239 /* Connection refused */
+ #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+-#define EREMOTERELEASE 240 /* Remote peer released connection */
+ #define EHOSTDOWN 241 /* Host is down */
+ #define EHOSTUNREACH 242 /* No route to host */
+
+diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
+index 2769eb991f58d..ad3a3239ea74b 100644
+--- a/arch/parisc/kernel/vmlinux.lds.S
++++ b/arch/parisc/kernel/vmlinux.lds.S
+@@ -131,6 +131,7 @@ SECTIONS
+ RO_DATA(8)
+
+ /* unwind info */
++ . = ALIGN(4);
+ .PARISC.unwind : {
+ __start___unwind = .;
+ *(.PARISC.unwind)
+diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
+index f71f2bbd4de64..9854364e599c1 100644
+--- a/arch/powerpc/kernel/fpu.S
++++ b/arch/powerpc/kernel/fpu.S
+@@ -23,6 +23,15 @@
+ #include <asm/feature-fixups.h>
+
+ #ifdef CONFIG_VSX
++#define __REST_1FPVSR(n,c,base) \
++BEGIN_FTR_SECTION \
++ b 2f; \
++END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
++ REST_FPR(n,base); \
++ b 3f; \
++2: REST_VSR(n,c,base); \
++3:
++
+ #define __REST_32FPVSRS(n,c,base) \
+ BEGIN_FTR_SECTION \
+ b 2f; \
+@@ -41,9 +50,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
+ 2: SAVE_32VSRS(n,c,base); \
+ 3:
+ #else
++#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
+ #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
+ #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
+ #endif
++#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
+ #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
+ #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
+
+@@ -67,6 +78,7 @@ _GLOBAL(store_fp_state)
+ SAVE_32FPVSRS(0, R4, R3)
+ mffs fr0
+ stfd fr0,FPSTATE_FPSCR(r3)
++ REST_1FPVSR(0, R4, R3)
+ blr
+ EXPORT_SYMBOL(store_fp_state)
+
+@@ -138,4 +150,5 @@ _GLOBAL(save_fpu)
+ 2: SAVE_32FPVSRS(0, R4, R6)
+ mffs fr0
+ stfd fr0,FPSTATE_FPSCR(r6)
++ REST_1FPVSR(0, R4, R6)
+ blr
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 67da147fe34dc..f2cbad5228811 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1163,11 +1163,11 @@ void kvmppc_save_user_regs(void)
+
+ usermsr = current->thread.regs->msr;
+
++ /* Caller has enabled FP/VEC/VSX/TM in MSR */
+ if (usermsr & MSR_FP)
+- save_fpu(current);
+-
++ __giveup_fpu(current);
+ if (usermsr & MSR_VEC)
+- save_altivec(current);
++ __giveup_altivec(current);
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (usermsr & MSR_TM) {
+diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
+index 5cf64740edb82..99c1e70841ea2 100644
+--- a/arch/powerpc/kernel/vector.S
++++ b/arch/powerpc/kernel/vector.S
+@@ -32,6 +32,7 @@ _GLOBAL(store_vr_state)
+ mfvscr v0
+ li r4, VRSTATE_VSCR
+ stvx v0, r4, r3
++ lvx v0, 0, r3
+ blr
+ EXPORT_SYMBOL(store_vr_state)
+
+@@ -108,6 +109,7 @@ _GLOBAL(save_altivec)
+ mfvscr v0
+ li r4,VRSTATE_VSCR
+ stvx v0,r4,r7
++ lvx v0,0,r7
+ blr
+
+ #ifdef CONFIG_VSX
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 6c2826417b337..93c60c0c9d4a7 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -294,7 +294,7 @@ static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+
+ inc_irq_stat(irq_hv_callback_count);
+
+- xen_hvm_evtchn_do_upcall();
++ xen_evtchn_do_upcall();
+
+ set_irq_regs(old_regs);
+ }
+diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
+index 2c6698aa218b1..abc07d0045897 100644
+--- a/arch/x86/include/asm/kvm-x86-ops.h
++++ b/arch/x86/include/asm/kvm-x86-ops.h
+@@ -106,6 +106,7 @@ KVM_X86_OP_OPTIONAL(vcpu_blocking)
+ KVM_X86_OP_OPTIONAL(vcpu_unblocking)
+ KVM_X86_OP_OPTIONAL(pi_update_irte)
+ KVM_X86_OP_OPTIONAL(pi_start_assignment)
++KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
+ KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
+ KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
+ KVM_X86_OP_OPTIONAL(set_hv_timer)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index c1dcaa3d2d6eb..dfcdcafe3a2cd 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1603,6 +1603,7 @@ struct kvm_x86_ops {
+ int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set);
+ void (*pi_start_assignment)(struct kvm *kvm);
++ void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
+ void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
+ bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
+
+diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
+index 7517eb05bdc1a..ab348aec86632 100644
+--- a/arch/x86/kernel/apic/msi.c
++++ b/arch/x86/kernel/apic/msi.c
+@@ -55,14 +55,14 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
+ * caused by the non-atomic update of the address/data pair.
+ *
+ * Direct update is possible when:
+- * - The MSI is maskable (remapped MSI does not use this code path)).
+- * The quirk bit is not set in this case.
++ * - The MSI is maskable (remapped MSI does not use this code path).
++ * The reservation mode bit is set in this case.
+ * - The new vector is the same as the old vector
+ * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
+ * - The interrupt is not yet started up
+ * - The new destination CPU is the same as the old destination CPU
+ */
+- if (!irqd_msi_nomask_quirk(irqd) ||
++ if (!irqd_can_reserve(irqd) ||
+ cfg->vector == old_cfg.vector ||
+ old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
+ !irqd_is_started(irqd) ||
+@@ -202,8 +202,6 @@ struct irq_domain * __init native_create_pci_msi_domain(void)
+ if (!d) {
+ irq_domain_free_fwnode(fn);
+ pr_warn("Failed to initialize PCI-MSI irqdomain.\n");
+- } else {
+- d->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
+ }
+ return d;
+ }
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 4dba0a84ba2f3..edcf45e312b99 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2446,6 +2446,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
+ u64 msr_val;
+ int i;
+
++ static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
++
+ if (!init_event) {
+ msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
+ if (kvm_vcpu_is_reset_bsp(vcpu))
+@@ -2757,6 +2759,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ int r;
+
++ static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
++
+ kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
+ /* set SPIV separately to get count of SW disabled APICs right */
+ apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 31a10d774df6d..98d732b9418f1 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6799,7 +6799,7 @@ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+ vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
+ }
+
+-static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
++static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+@@ -8172,7 +8172,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
+ .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
+ .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
+ .load_eoi_exitmap = vmx_load_eoi_exitmap,
+- .apicv_post_state_restore = vmx_apicv_post_state_restore,
++ .apicv_pre_state_restore = vmx_apicv_pre_state_restore,
+ .check_apicv_inhibit_reasons = vmx_check_apicv_inhibit_reasons,
+ .hwapic_irr_update = vmx_hwapic_irr_update,
+ .hwapic_isr_update = vmx_hwapic_isr_update,
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index b8db2148c07d5..3c61bb98c10e2 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -32,10 +32,13 @@ EXPORT_SYMBOL_GPL(hypercall_page);
+ * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
+ * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
+ * but during boot it is switched to point to xen_vcpu_info.
+- * The pointer is used in __xen_evtchn_do_upcall to acknowledge pending events.
++ * The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
++ * Make sure that xen_vcpu_info doesn't cross a page boundary by making it
++ * cache-line aligned (the struct is guaranteed to have a size of 64 bytes,
++ * which matches the cache line size of 64-bit x86 processors).
+ */
+ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+-DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
++DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
+
+ /* Linux <-> Xen vCPU id mapping */
+ DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
+@@ -160,6 +163,7 @@ void xen_vcpu_setup(int cpu)
+ int err;
+ struct vcpu_info *vcpup;
+
++ BUILD_BUG_ON(sizeof(*vcpup) > SMP_CACHE_BYTES);
+ BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
+
+ /*
+diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
+index c1cd28e915a3a..c66807dd02703 100644
+--- a/arch/x86/xen/enlighten_hvm.c
++++ b/arch/x86/xen/enlighten_hvm.c
+@@ -136,7 +136,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback)
+
+ inc_irq_stat(irq_hv_callback_count);
+
+- xen_hvm_evtchn_do_upcall();
++ xen_evtchn_do_upcall();
+
+ set_irq_regs(old_regs);
+ }
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index a10903785a338..b2b2f4315b78d 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -21,7 +21,7 @@ extern void *xen_initial_gdt;
+ struct trap_info;
+ void xen_copy_trap_info(struct trap_info *traps);
+
+-DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
++DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
+ DECLARE_PER_CPU(unsigned long, xen_cr3);
+ DECLARE_PER_CPU(unsigned long, xen_current_cr3);
+
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index e614eb3355d39..a9da2f05e6297 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1086,9 +1086,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
+ * Ask the sd driver to issue START STOP UNIT on runtime suspend
+ * and resume and shutdown only. For system level suspend/resume,
+ * devices power state is handled directly by libata EH.
++ * Given that disks are always spun up on system resume, also
++ * make sure that the sd driver forces runtime suspended disks
++ * to be resumed to correctly reflect the power state of the
++ * device.
+ */
+- sdev->manage_runtime_start_stop = true;
+- sdev->manage_shutdown = true;
++ sdev->manage_runtime_start_stop = 1;
++ sdev->manage_shutdown = 1;
++ sdev->force_runtime_start_on_system_start = 1;
+ }
+
+ /*
+diff --git a/drivers/auxdisplay/hd44780_common.c b/drivers/auxdisplay/hd44780_common.c
+index 3934c2eebf33d..7cbf375b0fa5e 100644
+--- a/drivers/auxdisplay/hd44780_common.c
++++ b/drivers/auxdisplay/hd44780_common.c
+@@ -82,7 +82,15 @@ int hd44780_common_clear_display(struct charlcd *lcd)
+ hdc->write_cmd(hdc, LCD_CMD_DISPLAY_CLEAR);
+ /* datasheet says to wait 1,64 milliseconds */
+ long_sleep(2);
+- return 0;
++
++ /*
++ * The Hitachi HD44780 controller (and compatible ones) reset the DDRAM
++ * address when executing the DISPLAY_CLEAR command, thus the
++ * following call is not required. However, other controllers do not
++ * (e.g. NewHaven NHD-0220DZW-AG5), thus move the cursor to home
++ * unconditionally to support both.
++ */
++ return hd44780_common_home(lcd);
+ }
+ EXPORT_SYMBOL_GPL(hd44780_common_clear_display);
+
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index f8d2bba9173d8..edc294ee5a5bc 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -296,7 +296,9 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
+ static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+ {
+- return amd_pstate_update_freq(policy, target_freq, true);
++ if (!amd_pstate_update_freq(policy, target_freq, true))
++ return target_freq;
++ return policy->cur;
+ }
+
+ static void amd_pstate_adjust_perf(unsigned int cpu,
+diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
+index ad4ce84931446..925fc17eaacb2 100644
+--- a/drivers/cpufreq/imx6q-cpufreq.c
++++ b/drivers/cpufreq/imx6q-cpufreq.c
+@@ -209,6 +209,14 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
+ .suspend = cpufreq_generic_suspend,
+ };
+
++static void imx6x_disable_freq_in_opp(struct device *dev, unsigned long freq)
++{
++ int ret = dev_pm_opp_disable(dev, freq);
++
++ if (ret < 0 && ret != -ENODEV)
++ dev_warn(dev, "failed to disable %ldMHz OPP\n", freq / 1000000);
++}
++
+ #define OCOTP_CFG3 0x440
+ #define OCOTP_CFG3_SPEED_SHIFT 16
+ #define OCOTP_CFG3_SPEED_1P2GHZ 0x3
+@@ -254,17 +262,15 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
+ val &= 0x3;
+
+ if (val < OCOTP_CFG3_SPEED_996MHZ)
+- if (dev_pm_opp_disable(dev, 996000000))
+- dev_warn(dev, "failed to disable 996MHz OPP\n");
++ imx6x_disable_freq_in_opp(dev, 996000000);
+
+ if (of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6qp")) {
+ if (val != OCOTP_CFG3_SPEED_852MHZ)
+- if (dev_pm_opp_disable(dev, 852000000))
+- dev_warn(dev, "failed to disable 852MHz OPP\n");
++ imx6x_disable_freq_in_opp(dev, 852000000);
++
+ if (val != OCOTP_CFG3_SPEED_1P2GHZ)
+- if (dev_pm_opp_disable(dev, 1200000000))
+- dev_warn(dev, "failed to disable 1.2GHz OPP\n");
++ imx6x_disable_freq_in_opp(dev, 1200000000);
+ }
+
+ return 0;
+@@ -316,20 +322,16 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
+ val >>= OCOTP_CFG3_SPEED_SHIFT;
+ val &= 0x3;
+
+- if (of_machine_is_compatible("fsl,imx6ul")) {
++ if (of_machine_is_compatible("fsl,imx6ul"))
+ if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
+- if (dev_pm_opp_disable(dev, 696000000))
+- dev_warn(dev, "failed to disable 696MHz OPP\n");
+- }
++ imx6x_disable_freq_in_opp(dev, 696000000);
+
+ if (of_machine_is_compatible("fsl,imx6ull")) {
+- if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
+- if (dev_pm_opp_disable(dev, 792000000))
+- dev_warn(dev, "failed to disable 792MHz OPP\n");
++ if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
++ imx6x_disable_freq_in_opp(dev, 792000000);
+
+ if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
+- if (dev_pm_opp_disable(dev, 900000000))
+- dev_warn(dev, "failed to disable 900MHz OPP\n");
++ imx6x_disable_freq_in_opp(dev, 900000000);
+ }
+
+ return ret;
+diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
+index f1ba71aed33c3..e78ff9333c7a3 100644
+--- a/drivers/dma-buf/dma-resv.c
++++ b/drivers/dma-buf/dma-resv.c
+@@ -296,7 +296,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+
+ dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
+ if ((old->context == fence->context && old_usage >= usage &&
+- dma_fence_is_later(fence, old)) ||
++ dma_fence_is_later_or_same(fence, old)) ||
+ dma_fence_is_signaled(old)) {
+ dma_resv_list_set(fobj, i, fence, usage);
+ dma_fence_put(old);
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index adddd8c45d0c1..74bab06283b71 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -717,14 +717,11 @@ static void create_units(struct fw_device *device)
+ fw_unit_attributes,
+ &unit->attribute_group);
+
+- if (device_register(&unit->device) < 0)
+- goto skip_unit;
+-
+ fw_device_get(device);
+- continue;
+-
+- skip_unit:
+- kfree(unit);
++ if (device_register(&unit->device) < 0) {
++ put_device(&unit->device);
++ continue;
++ }
+ }
+ }
+
+diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
+index 7ad2e03afd4e5..234cd17fdee13 100644
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -1519,9 +1519,9 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+ sdev->use_10_for_rw = 1;
+
+ if (sbp2_param_exclusive_login) {
+- sdev->manage_system_start_stop = true;
+- sdev->manage_runtime_start_stop = true;
+- sdev->manage_shutdown = true;
++ sdev->manage_system_start_stop = 1;
++ sdev->manage_runtime_start_stop = 1;
++ sdev->manage_shutdown = 1;
+ }
+
+ if (sdev->type == TYPE_ROM)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 6e5bc74846952..b9983ca99eb7d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2200,6 +2200,8 @@ retry_init:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
++ pci_wake_from_d3(pdev, TRUE);
++
+ /*
+ * For runpm implemented via BACO, PMFW will handle the
+ * timing for BACO in and out:
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index bc65fc1350f9a..23e7e5126eae6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -82,6 +82,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
+ MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
+
++static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
++ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
++};
++
+ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
+ {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
+@@ -274,6 +278,10 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
+ default:
+ break;
+ }
++ soc15_program_register_sequence(adev,
++ golden_settings_gc_11_0,
++ (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
++
+ }
+
+ static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 001932cb813dc..6d5f3c5fb4a62 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6149,7 +6149,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ dm_new_state->underscan_enable = val;
+ ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+- dm_new_state->abm_level = val;
++ dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
+ ret = 0;
+ }
+
+@@ -6194,7 +6194,8 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ *val = dm_state->underscan_enable;
+ ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+- *val = dm_state->abm_level;
++ *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
++ dm_state->abm_level : 0;
+ ret = 0;
+ }
+
+@@ -6274,7 +6275,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+ state->pbn = 0;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+- state->abm_level = amdgpu_dm_abm_level;
++ state->abm_level = amdgpu_dm_abm_level ?:
++ ABM_LEVEL_IMMEDIATE_DISABLE;
+
+ __drm_atomic_helper_connector_reset(connector, &state->base);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index f773a467fef54..7e775cec06927 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -499,9 +499,12 @@ enum dcn_zstate_support_state {
+ DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY,
+ DCN_ZSTATE_SUPPORT_DISALLOW,
+ };
+-/*
+- * For any clocks that may differ per pipe
+- * only the max is stored in this structure
++
++/**
++ * dc_clocks - DC pipe clocks
++ *
++ * For any clocks that may differ per pipe only the max is stored in this
++ * structure
+ */
+ struct dc_clocks {
+ int dispclk_khz;
+@@ -528,6 +531,16 @@ struct dc_clocks {
+ bool prev_p_state_change_support;
+ bool fclk_prev_p_state_change_support;
+ int num_ways;
++
++ /**
++ * @fw_based_mclk_switching
++ *
++ * DC has a mechanism that leverage the variable refresh rate to switch
++ * memory clock in cases that we have a large latency to achieve the
++ * memory clock change and a short vblank window. DC has some
++ * requirements to enable this feature, and this field describes if the
++ * system support or not such a feature.
++ */
+ bool fw_based_mclk_switching;
+ bool fw_based_mclk_switching_shut_down;
+ int prev_num_ways;
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 364ff913527d8..31c6a80c216ff 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -202,7 +202,18 @@ struct dc_stream_state {
+ bool use_vsc_sdp_for_colorimetry;
+ bool ignore_msa_timing_param;
+
++ /**
++ * @allow_freesync:
++ *
++ * It say if Freesync is enabled or not.
++ */
+ bool allow_freesync;
++
++ /**
++ * @vrr_active_variable:
++ *
++ * It describes if VRR is in use.
++ */
+ bool vrr_active_variable;
+ bool freesync_on_desktop;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+index 7a00fe525dfba..3538973bd0c6c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+@@ -308,7 +308,10 @@ bool cm_helper_convert_to_custom_float(
+ #define NUMBER_REGIONS 32
+ #define NUMBER_SW_SEGMENTS 16
+
+-bool cm_helper_translate_curve_to_hw_format(
++#define DC_LOGGER \
++ ctx->logger
++
++bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params, bool fixpoint)
+ {
+@@ -482,10 +485,18 @@ bool cm_helper_translate_curve_to_hw_format(
+ rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
+ rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
+
++
+ if (fixpoint == true) {
+- rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red);
+- rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green);
+- rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue);
++ uint32_t red_clamp = dc_fixpt_clamp_u0d14(rgb->delta_red);
++ uint32_t green_clamp = dc_fixpt_clamp_u0d14(rgb->delta_green);
++ uint32_t blue_clamp = dc_fixpt_clamp_u0d14(rgb->delta_blue);
++
++ if (red_clamp >> 10 || green_clamp >> 10 || blue_clamp >> 10)
++ DC_LOG_WARNING("Losing delta precision while programming shaper LUT.");
++
++ rgb->delta_red_reg = red_clamp & 0x3ff;
++ rgb->delta_green_reg = green_clamp & 0x3ff;
++ rgb->delta_blue_reg = blue_clamp & 0x3ff;
+ rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red);
+ rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green);
+ rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+index 3b8cd7410498a..0a68b63d61260 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+@@ -106,6 +106,7 @@ bool cm_helper_convert_to_custom_float(
+ bool fixpoint);
+
+ bool cm_helper_translate_curve_to_hw_format(
++ struct dc_context *ctx,
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params, bool fixpoint);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 3940271189632..d84579da64003 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1867,7 +1867,7 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
+ * update.
+ */
+- else if (cm_helper_translate_curve_to_hw_format(
++ else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
+ stream->out_transfer_func,
+ &dpp->regamma_params, false)) {
+ dpp->funcs->dpp_program_regamma_pwl(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index fbc188812ccc9..9bd6a5716cdc1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -843,7 +843,7 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ params = &stream->out_transfer_func->pwl;
+ else if (pipe_ctx->stream->out_transfer_func->type ==
+ TF_TYPE_DISTRIBUTED_POINTS &&
+- cm_helper_translate_curve_to_hw_format(
++ cm_helper_translate_curve_to_hw_format(dc->ctx,
+ stream->out_transfer_func,
+ &mpc->blender_params, false))
+ params = &mpc->blender_params;
+@@ -872,7 +872,7 @@ bool dcn20_set_blend_lut(
+ if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
+ blend_lut = &plane_state->blend_tf->pwl;
+ else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
+- cm_helper_translate_curve_to_hw_format(
++ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+ plane_state->blend_tf,
+ &dpp_base->regamma_params, false);
+ blend_lut = &dpp_base->regamma_params;
+@@ -894,7 +894,7 @@ bool dcn20_set_shaper_3dlut(
+ if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
+ shaper_lut = &plane_state->in_shaper_func->pwl;
+ else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
+- cm_helper_translate_curve_to_hw_format(
++ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
+ plane_state->in_shaper_func,
+ &dpp_base->shaper_params, true);
+ shaper_lut = &dpp_base->shaper_params;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
+index 6a3d3a0ec0a36..701c7d8bc038a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
+@@ -280,7 +280,7 @@ bool dwb3_ogam_set_input_transfer_func(
+ dwb_ogam_lut = kzalloc(sizeof(*dwb_ogam_lut), GFP_KERNEL);
+
+ if (dwb_ogam_lut) {
+- cm_helper_translate_curve_to_hw_format(
++ cm_helper_translate_curve_to_hw_format(dwbc->ctx,
+ in_transfer_func_dwb_ogam,
+ dwb_ogam_lut, false);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index a1b312483d7f1..53262f6bc40b0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -91,8 +91,8 @@ bool dcn30_set_blend_lut(
+ return result;
+ }
+
+-static bool dcn30_set_mpc_shaper_3dlut(
+- struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream)
++static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx,
++ const struct dc_stream_state *stream)
+ {
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+@@ -104,19 +104,18 @@ static bool dcn30_set_mpc_shaper_3dlut(
+ const struct pwl_params *shaper_lut = NULL;
+ //get the shaper lut params
+ if (stream->func_shaper) {
+- if (stream->func_shaper->type == TF_TYPE_HWPWL)
++ if (stream->func_shaper->type == TF_TYPE_HWPWL) {
+ shaper_lut = &stream->func_shaper->pwl;
+- else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+- cm_helper_translate_curve_to_hw_format(
+- stream->func_shaper,
+- &dpp_base->shaper_params, true);
++ } else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
++ cm_helper_translate_curve_to_hw_format(stream->ctx, stream->func_shaper,
++ &dpp_base->shaper_params, true);
+ shaper_lut = &dpp_base->shaper_params;
+ }
+ }
+
+ if (stream->lut3d_func &&
+- stream->lut3d_func->state.bits.initialized == 1 &&
+- stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
++ stream->lut3d_func->state.bits.initialized == 1 &&
++ stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
+ if (stream->lut3d_func->state.bits.rmu_mux_num == 0)
+ mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu0_mux;
+ else if (stream->lut3d_func->state.bits.rmu_mux_num == 1)
+@@ -125,20 +124,22 @@ static bool dcn30_set_mpc_shaper_3dlut(
+ mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu2_mux;
+ if (mpcc_id_projected != mpcc_id)
+ BREAK_TO_DEBUGGER();
+- /*find the reason why logical layer assigned a differant mpcc_id into acquire_post_bldn_3dlut*/
++ /* find the reason why logical layer assigned a different
++ * mpcc_id into acquire_post_bldn_3dlut
++ */
+ acquired_rmu = mpc->funcs->acquire_rmu(mpc, mpcc_id,
+- stream->lut3d_func->state.bits.rmu_mux_num);
++ stream->lut3d_func->state.bits.rmu_mux_num);
+ if (acquired_rmu != stream->lut3d_func->state.bits.rmu_mux_num)
+ BREAK_TO_DEBUGGER();
+- result = mpc->funcs->program_3dlut(mpc,
+- &stream->lut3d_func->lut_3d,
+- stream->lut3d_func->state.bits.rmu_mux_num);
++
++ result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d,
++ stream->lut3d_func->state.bits.rmu_mux_num);
+ result = mpc->funcs->program_shaper(mpc, shaper_lut,
+- stream->lut3d_func->state.bits.rmu_mux_num);
+- } else
+- /*loop through the available mux and release the requested mpcc_id*/
++ stream->lut3d_func->state.bits.rmu_mux_num);
++ } else {
++ // loop through the available mux and release the requested mpcc_id
+ mpc->funcs->release_rmu(mpc, mpcc_id);
+-
++ }
+
+ return result;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 503ab45b4ace3..6b8abdb5c7f89 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -884,7 +884,7 @@ static const struct dc_plane_cap plane_cap = {
+ static const struct dc_debug_options debug_defaults_drv = {
+ .disable_z10 = false,
+ .enable_z9_disable_interface = true,
+- .minimum_z8_residency_time = 2000,
++ .minimum_z8_residency_time = 2100,
+ .psr_skip_crtc_disable = true,
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index 50b3547977281..bd75d3cba0980 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -530,7 +530,7 @@ static bool dcn32_set_mpc_shaper_3dlut(
+ if (stream->func_shaper->type == TF_TYPE_HWPWL)
+ shaper_lut = &stream->func_shaper->pwl;
+ else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+- cm_helper_translate_curve_to_hw_format(
++ cm_helper_translate_curve_to_hw_format(stream->ctx,
+ stream->func_shaper,
+ &dpp_base->shaper_params, true);
+ shaper_lut = &dpp_base->shaper_params;
+@@ -566,8 +566,7 @@ bool dcn32_set_mcm_luts(
+ if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
+ lut_params = &plane_state->blend_tf->pwl;
+ else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
+- cm_helper_translate_curve_to_hw_format(
+- plane_state->blend_tf,
++ cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf,
+ &dpp_base->regamma_params, false);
+ lut_params = &dpp_base->regamma_params;
+ }
+@@ -581,8 +580,7 @@ bool dcn32_set_mcm_luts(
+ else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ // TODO: dpp_base replace
+ ASSERT(false);
+- cm_helper_translate_curve_to_hw_format(
+- plane_state->in_shaper_func,
++ cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func,
+ &dpp_base->shaper_params, true);
+ lut_params = &dpp_base->shaper_params;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
+index 74e86732e3010..2cbdd75429ffd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
+@@ -29,6 +29,13 @@
+ #define DC__PRESENT 1
+ #define DC__PRESENT__1 1
+ #define DC__NUM_DPP 4
++
++/**
++ * @DC__VOLTAGE_STATES:
++ *
++ * Define the maximum amount of states supported by the ASIC. Every ASIC has a
++ * specific number of states; this macro defines the maximum number of states.
++ */
+ #define DC__VOLTAGE_STATES 20
+ #define DC__NUM_DPP__4 1
+ #define DC__NUM_DPP__0_PRESENT 1
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index dbe5d2efa4a30..9d224bb2b3df6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -948,10 +948,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+ {
+ int plane_count;
+ int i;
+- unsigned int min_dst_y_next_start_us;
+
+ plane_count = 0;
+- min_dst_y_next_start_us = 0;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ plane_count++;
+@@ -973,26 +971,15 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
+ else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ struct dc_link *link = context->streams[0]->sink->link;
+ struct dc_stream_status *stream_status = &context->stream_status[0];
+- struct dc_stream_state *current_stream = context->streams[0];
+ int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
+ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
+ bool is_pwrseq0 = link->link_index == 0;
+- bool isFreesyncVideo;
+-
+- isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
+- isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
+- min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
+- break;
+- }
+- }
+
+ /* Don't support multi-plane configurations */
+ if (stream_status->plane_count > 1)
+ return DCN_ZSTATE_SUPPORT_DISALLOW;
+
+- if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
++ if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+ return DCN_ZSTATE_SUPPORT_ALLOW;
+ else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
+ return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index c89b761bcb926..85e0d1c2a9085 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -1788,6 +1788,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
+ int i, pipe_idx, vlevel_temp = 0;
+ double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
+ double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
++ double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed;
+ double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
+ bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
+ dm_dram_clock_change_unsupported;
+@@ -1921,7 +1922,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
+ }
+
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+- min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
++ min_dram_speed_mts = dram_speed_from_validation;
+ min_dram_speed_mts_margin = 160;
+
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+index f394b3f3922a8..0bffae95f3a29 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+@@ -105,14 +105,39 @@ enum source_macro_tile_size {
+ enum cursor_bpp {
+ dm_cur_2bit = 0, dm_cur_32bit = 1, dm_cur_64bit = 2
+ };
++
++/**
++ * @enum clock_change_support - It represents possible reasons to change the DRAM clock.
++ *
++ * DC may change the DRAM clock during its execution, and this enum tracks all
++ * the available methods. Note that every ASIC has their specific way to deal
++ * with these clock switch.
++ */
+ enum clock_change_support {
++ /**
++ * @dm_dram_clock_change_uninitialized: If you see this, we might have
++ * a code initialization issue
++ */
+ dm_dram_clock_change_uninitialized = 0,
++
++ /**
++ * @dm_dram_clock_change_vactive: Support DRAM switch in VActive
++ */
+ dm_dram_clock_change_vactive,
++
++ /**
++ * @dm_dram_clock_change_vblank: Support DRAM switch in VBlank
++ */
+ dm_dram_clock_change_vblank,
++
+ dm_dram_clock_change_vactive_w_mall_full_frame,
+ dm_dram_clock_change_vactive_w_mall_sub_vp,
+ dm_dram_clock_change_vblank_w_mall_full_frame,
+ dm_dram_clock_change_vblank_w_mall_sub_vp,
++
++ /**
++ * @dm_dram_clock_change_unsupported: Do not support DRAM switch
++ */
+ dm_dram_clock_change_unsupported
+ };
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+index 2b34b02dbd459..81e53e67cd0b0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+@@ -419,6 +419,15 @@ struct vba_vars_st {
+ double MinPixelChunkSizeBytes;
+ unsigned int DCCMetaBufferSizeBytes;
+ // Pipe/Plane Parameters
++
++ /** @VoltageLevel:
++ * Every ASIC has a fixed number of DPM states, and some devices might
++ * have some particular voltage configuration that does not map
++ * directly to the DPM states. This field tells how many states the
++ * target device supports; even though this field combines the DPM and
++ * special SOC voltages, it mostly matches the total number of DPM
++ * states.
++ */
+ int VoltageLevel;
+ double FabricClock;
+ double DRAMSpeed;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 5fa7c4772af4f..d2b9e3f83fc3b 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -115,6 +115,13 @@ struct resource_funcs {
+ int vlevel);
+ void (*update_soc_for_wm_a)(
+ struct dc *dc, struct dc_state *context);
++
++ /**
++ * @populate_dml_pipes - Populate pipe data struct
++ *
++ * Returns:
++ * Total of pipes available in the specific ASIC.
++ */
+ int (*populate_dml_pipes)(
+ struct dc *dc,
+ struct dc_state *context,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+index cd2be729846b4..a819f0f97c5f3 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+@@ -35,6 +35,13 @@
+ ******************************************************************************/
+
+ #define MAX_AUDIOS 7
++
++/**
++ * @MAX_PIPES:
++ *
++ * Every ASIC support a fixed number of pipes; MAX_PIPES defines a large number
++ * to be used inside loops and for determining array sizes.
++ */
+ #define MAX_PIPES 6
+ #define MAX_DIG_LINK_ENCODERS 7
+ #define MAX_DWB_PIPES 1
+diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+index a21fe7b037d1f..aaabaab49809d 100644
+--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
++++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+@@ -332,6 +332,8 @@ struct dmub_srv_hw_funcs {
+ void (*setup_mailbox)(struct dmub_srv *dmub,
+ const struct dmub_region *inbox1);
+
++ uint32_t (*get_inbox1_wptr)(struct dmub_srv *dmub);
++
+ uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub);
+
+ void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
+@@ -590,6 +592,18 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+ */
+ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
+
++/**
++ * dmub_srv_sync_inbox1() - sync sw state with hw state
++ * @dmub: the dmub service
++ *
++ * Sync sw state with hw state when resume from S0i3
++ *
++ * Return:
++ * DMUB_STATUS_OK - success
++ * DMUB_STATUS_INVALID - unspecified error
++ */
++enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
++
+ /**
+ * dmub_srv_cmd_queue() - queues a command to the DMUB
+ * @dmub: the dmub service
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+index a6540e27044d2..98dad0d47e72c 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+@@ -282,6 +282,11 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
+ REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
+ }
+
++uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub)
++{
++ return REG_READ(DMCUB_INBOX1_WPTR);
++}
++
+ uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub)
+ {
+ return REG_READ(DMCUB_INBOX1_RPTR);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+index c2e5831ac52cc..1df128e57ed3b 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+@@ -202,6 +202,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
+ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
+ const struct dmub_region *inbox1);
+
++uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub);
++
+ uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub);
+
+ void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+index 89d24fb7024e2..5e952541e72d5 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+@@ -242,6 +242,11 @@ void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
+ REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
+ }
+
++uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub)
++{
++ return REG_READ(DMCUB_INBOX1_WPTR);
++}
++
+ uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub)
+ {
+ return REG_READ(DMCUB_INBOX1_RPTR);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+index eb62410941473..89c5a948b67d5 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+@@ -204,6 +204,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
+ void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
+ const struct dmub_region *inbox1);
+
++uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub);
++
+ uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub);
+
+ void dmub_dcn31_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+index 9c20516be066c..d2f03f797279f 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+@@ -266,6 +266,11 @@ void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
+ REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
+ }
+
++uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub)
++{
++ return REG_READ(DMCUB_INBOX1_WPTR);
++}
++
+ uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub)
+ {
+ return REG_READ(DMCUB_INBOX1_RPTR);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+index 7d1a6eb4d6657..f15336b6e22be 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+@@ -206,6 +206,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
+ void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
+ const struct dmub_region *inbox1);
+
++uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub);
++
+ uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub);
+
+ void dmub_dcn32_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+index c3327875933e9..e951fd837aa27 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+@@ -167,6 +167,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
+ funcs->backdoor_load = dmub_dcn20_backdoor_load;
+ funcs->setup_windows = dmub_dcn20_setup_windows;
+ funcs->setup_mailbox = dmub_dcn20_setup_mailbox;
++ funcs->get_inbox1_wptr = dmub_dcn20_get_inbox1_wptr;
+ funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr;
+ funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
+ funcs->is_supported = dmub_dcn20_is_supported;
+@@ -243,6 +244,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
+ funcs->backdoor_load = dmub_dcn31_backdoor_load;
+ funcs->setup_windows = dmub_dcn31_setup_windows;
+ funcs->setup_mailbox = dmub_dcn31_setup_mailbox;
++ funcs->get_inbox1_wptr = dmub_dcn31_get_inbox1_wptr;
+ funcs->get_inbox1_rptr = dmub_dcn31_get_inbox1_rptr;
+ funcs->set_inbox1_wptr = dmub_dcn31_set_inbox1_wptr;
+ funcs->setup_out_mailbox = dmub_dcn31_setup_out_mailbox;
+@@ -281,6 +283,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
+ funcs->backdoor_load_zfb_mode = dmub_dcn32_backdoor_load_zfb_mode;
+ funcs->setup_windows = dmub_dcn32_setup_windows;
+ funcs->setup_mailbox = dmub_dcn32_setup_mailbox;
++ funcs->get_inbox1_wptr = dmub_dcn32_get_inbox1_wptr;
+ funcs->get_inbox1_rptr = dmub_dcn32_get_inbox1_rptr;
+ funcs->set_inbox1_wptr = dmub_dcn32_set_inbox1_wptr;
+ funcs->setup_out_mailbox = dmub_dcn32_setup_out_mailbox;
+@@ -666,6 +669,27 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+ return DMUB_STATUS_OK;
+ }
+
++enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
++{
++ if (!dmub->sw_init)
++ return DMUB_STATUS_INVALID;
++
++ if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
++ uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
++ uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
++
++ if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
++ return DMUB_STATUS_HW_FAILURE;
++ } else {
++ dmub->inbox1_rb.rptr = rptr;
++ dmub->inbox1_rb.wrpt = wptr;
++ dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
++ }
++ }
++
++ return DMUB_STATUS_OK;
++}
++
+ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
+ {
+ if (!dmub->sw_init)
+@@ -694,6 +718,11 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
++ if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
++ dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
++ return DMUB_STATUS_HW_FAILURE;
++ }
++
+ if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ return DMUB_STATUS_OK;
+
+@@ -964,6 +993,7 @@ enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t ti
+ ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
+ if (ack)
+ return DMUB_STATUS_OK;
++ udelay(1);
+ }
+ return DMUB_STATUS_TIMEOUT;
+ }
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
+index c92c4b83253f8..4bff1ef8a9a64 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
+@@ -6369,6 +6369,8 @@
+ #define regTCP_INVALIDATE_BASE_IDX 1
+ #define regTCP_STATUS 0x19a1
+ #define regTCP_STATUS_BASE_IDX 1
++#define regTCP_CNTL 0x19a2
++#define regTCP_CNTL_BASE_IDX 1
+ #define regTCP_CNTL2 0x19a3
+ #define regTCP_CNTL2_BASE_IDX 1
+ #define regTCP_DEBUG_INDEX 0x19a5
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+index d30ec3005ea19..cd8b0ab0112ae 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+@@ -258,8 +258,11 @@ static int aldebaran_tables_init(struct smu_context *smu)
+ }
+
+ smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+- if (!smu_table->ecc_table)
++ if (!smu_table->ecc_table) {
++ kfree(smu_table->metrics_table);
++ kfree(smu_table->gpu_metrics_table);
+ return -ENOMEM;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index fc6957fddce8e..8404286302b0c 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -136,6 +136,7 @@ static const struct xpad_device {
+ { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+ { 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
+ { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
++ { 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
+ { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
+ { 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
+ { 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
+@@ -459,6 +460,7 @@ static const struct usb_device_id xpad_table[] = {
+ XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */
+ XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
+ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */
++ XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One Controllers */
+ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
+ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
+ XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index f800989ea0462..418af1db0192d 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -1495,6 +1495,15 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ {
+ struct qi_desc desc;
+
++ /*
++ * VT-d spec, section 4.3:
++ *
++ * Software is recommended to not submit any Device-TLB invalidation
++ * requests while address remapping hardware is disabled.
++ */
++ if (!(iommu->gcmd & DMA_GCMD_TE))
++ return;
++
+ if (mask) {
+ addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+ desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+@@ -1560,6 +1569,15 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
+ struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
+
++ /*
++ * VT-d spec, section 4.3:
++ *
++ * Software is recommended to not submit any Device-TLB invalidation
++ * requests while address remapping hardware is disabled.
++ */
++ if (!(iommu->gcmd & DMA_GCMD_TE))
++ return;
++
+ desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
+ QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
+ QI_DEV_IOTLB_PFSID(pfsid);
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 5c4f5aa8e87e4..e111b35a7aff2 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -277,7 +277,7 @@ static LIST_HEAD(dmar_satc_units);
+ #define for_each_rmrr_units(rmrr) \
+ list_for_each_entry(rmrr, &dmar_rmrr_units, list)
+
+-static void dmar_remove_one_dev_info(struct device *dev);
++static void device_block_translation(struct device *dev);
+
+ int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON);
+ int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
+@@ -1418,7 +1418,7 @@ static void iommu_enable_pci_caps(struct device_domain_info *info)
+ {
+ struct pci_dev *pdev;
+
+- if (!info || !dev_is_pci(info->dev))
++ if (!dev_is_pci(info->dev))
+ return;
+
+ pdev = to_pci_dev(info->dev);
+@@ -2064,7 +2064,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
+ } else {
+ iommu_flush_write_buffer(iommu);
+ }
+- iommu_enable_pci_caps(info);
+
+ ret = 0;
+
+@@ -2494,13 +2493,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
+
+ /* PASID table is mandatory for a PCI device in scalable mode. */
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
+- ret = intel_pasid_alloc_table(dev);
+- if (ret) {
+- dev_err(dev, "PASID table allocation failed\n");
+- dmar_remove_one_dev_info(dev);
+- return ret;
+- }
+-
+ /* Setup the PASID entry for requests without PASID: */
+ if (hw_pass_through && domain_type_is_si(domain))
+ ret = intel_pasid_setup_pass_through(iommu, domain,
+@@ -2513,7 +2505,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
+ dev, PASID_RID2PASID);
+ if (ret) {
+ dev_err(dev, "Setup RID2PASID failed\n");
+- dmar_remove_one_dev_info(dev);
++ device_block_translation(dev);
+ return ret;
+ }
+ }
+@@ -2521,10 +2513,13 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
+ ret = domain_context_mapping(domain, dev);
+ if (ret) {
+ dev_err(dev, "Domain context map failed\n");
+- dmar_remove_one_dev_info(dev);
++ device_block_translation(dev);
+ return ret;
+ }
+
++ if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
++ iommu_enable_pci_caps(info);
++
+ return 0;
+ }
+
+@@ -4091,8 +4086,8 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
+ */
+ static void domain_context_clear(struct device_domain_info *info)
+ {
+- if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
+- return;
++ if (!dev_is_pci(info->dev))
++ domain_context_clear_one(info, info->bus, info->devfn);
+
+ pci_for_each_dma_alias(to_pci_dev(info->dev),
+ &domain_context_clear_one_cb, info);
+@@ -4112,7 +4107,6 @@ static void dmar_remove_one_dev_info(struct device *dev)
+
+ iommu_disable_dev_iotlb(info);
+ domain_context_clear(info);
+- intel_pasid_free_table(info->dev);
+ }
+
+ spin_lock_irqsave(&domain->lock, flags);
+@@ -4123,6 +4117,37 @@ static void dmar_remove_one_dev_info(struct device *dev)
+ info->domain = NULL;
+ }
+
++/*
++ * Clear the page table pointer in context or pasid table entries so that
++ * all DMA requests without PASID from the device are blocked. If the page
++ * table has been set, clean up the data structures.
++ */
++static void device_block_translation(struct device *dev)
++{
++ struct device_domain_info *info = dev_iommu_priv_get(dev);
++ struct intel_iommu *iommu = info->iommu;
++ unsigned long flags;
++
++ iommu_disable_dev_iotlb(info);
++ if (!dev_is_real_dma_subdevice(dev)) {
++ if (sm_supported(iommu))
++ intel_pasid_tear_down_entry(iommu, dev,
++ PASID_RID2PASID, false);
++ else
++ domain_context_clear(info);
++ }
++
++ if (!info->domain)
++ return;
++
++ spin_lock_irqsave(&info->domain->lock, flags);
++ list_del(&info->link);
++ spin_unlock_irqrestore(&info->domain->lock, flags);
++
++ domain_detach_iommu(info->domain, iommu);
++ info->domain = NULL;
++}
++
+ static int md_domain_init(struct dmar_domain *domain, int guest_width)
+ {
+ int adjust_width;
+@@ -4246,7 +4271,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ if (info->domain)
+- dmar_remove_one_dev_info(dev);
++ device_block_translation(dev);
+ }
+
+ ret = prepare_domain_attach_device(domain, dev);
+@@ -4477,6 +4502,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
+ struct device_domain_info *info;
+ struct intel_iommu *iommu;
+ u8 bus, devfn;
++ int ret;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu || !iommu->iommu.ops)
+@@ -4521,6 +4547,16 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
+
+ dev_iommu_priv_set(dev, info);
+
++ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
++ ret = intel_pasid_alloc_table(dev);
++ if (ret) {
++ dev_err(dev, "PASID table allocation failed\n");
++ dev_iommu_priv_set(dev, NULL);
++ kfree(info);
++ return ERR_PTR(ret);
++ }
++ }
++
+ return &iommu->iommu;
+ }
+
+@@ -4529,6 +4565,7 @@ static void intel_iommu_release_device(struct device *dev)
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ dmar_remove_one_dev_info(dev);
++ intel_pasid_free_table(dev);
+ dev_iommu_priv_set(dev, NULL);
+ kfree(info);
+ set_dma_ops(dev, NULL);
+@@ -4872,7 +4909,7 @@ static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
+ ver = (dev->device >> 8) & 0xff;
+ if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
+ ver != 0x4e && ver != 0x8a && ver != 0x98 &&
+- ver != 0x9a && ver != 0xa7)
++ ver != 0x9a && ver != 0xa7 && ver != 0x7d)
+ return;
+
+ if (risky_device(dev))
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 4d3595d6d1c40..05e3157fc7b4e 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1489,7 +1489,7 @@ out_nocoalesce:
+ bch_keylist_free(&keylist);
+
+ for (i = 0; i < nodes; i++)
+- if (!IS_ERR(new_nodes[i])) {
++ if (!IS_ERR_OR_NULL(new_nodes[i])) {
+ btree_node_free(new_nodes[i]);
+ rw_unlock(true, new_nodes[i]);
+ }
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 9eb03bb224698..0304e36af329c 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -24,7 +24,8 @@ bool verity_fec_is_enabled(struct dm_verity *v)
+ */
+ static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
+ {
+- return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
++ return (struct dm_verity_fec_io *)
++ ((char *)io + io->v->ti->per_io_data_size - sizeof(struct dm_verity_fec_io));
+ }
+
+ /*
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index b86d41219ba9c..24df610a2c438 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -631,7 +631,6 @@ static void verity_work(struct work_struct *w)
+
+ io->in_tasklet = false;
+
+- verity_fec_init_io(io);
+ verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
+ }
+
+@@ -657,7 +656,9 @@ static void verity_end_io(struct bio *bio)
+ struct dm_verity_io *io = bio->bi_private;
+
+ if (bio->bi_status &&
+- (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
++ (!verity_fec_is_enabled(io->v) ||
++ verity_is_system_shutting_down() ||
++ (bio->bi_opf & REQ_RAHEAD))) {
+ verity_finish_io(io, bio->bi_status);
+ return;
+ }
+@@ -779,6 +780,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
+ bio->bi_private = io;
+ io->iter = bio->bi_iter;
+
++ verity_fec_init_io(io);
++
+ verity_submit_prefetch(v, io);
+
+ submit_bio_noacct(bio);
+diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
+index f96f4e281ee4a..f9d522c870e61 100644
+--- a/drivers/md/dm-verity.h
++++ b/drivers/md/dm-verity.h
+@@ -115,12 +115,6 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
+ return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size;
+ }
+
+-static inline u8 *verity_io_digest_end(struct dm_verity *v,
+- struct dm_verity_io *io)
+-{
+- return verity_io_want_digest(v, io) + v->digest_size;
+-}
+-
+ extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ struct bvec_iter *iter,
+ int (*process)(struct dm_verity *v,
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 67a7ae9b997aa..770490234c872 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1505,6 +1505,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
++ } else if (mq->in_recovery) {
++ blk_mq_requeue_request(req, true);
+ } else {
+ blk_mq_end_request(req, BLK_STS_OK);
+ }
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index de1cc9e1ae576..df85c35a86a3b 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -552,7 +552,9 @@ int mmc_cqe_recovery(struct mmc_host *host)
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
+- mmc_wait_for_cmd(host, &cmd, 0);
++ mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
++
++ mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_CMDQ_TASK_MGMT;
+@@ -560,10 +562,13 @@ int mmc_cqe_recovery(struct mmc_host *host)
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
+- err = mmc_wait_for_cmd(host, &cmd, 0);
++ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
+ host->cqe_ops->cqe_recovery_finish(host);
+
++ if (err)
++ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
++
+ mmc_retune_release(host);
+
+ return err;
+diff --git a/drivers/mmc/core/regulator.c b/drivers/mmc/core/regulator.c
+index 609201a467ef9..4dcbc2281d2b5 100644
+--- a/drivers/mmc/core/regulator.c
++++ b/drivers/mmc/core/regulator.c
+@@ -271,3 +271,44 @@ int mmc_regulator_get_supply(struct mmc_host *mmc)
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
++
++/**
++ * mmc_regulator_enable_vqmmc - enable VQMMC regulator for a host
++ * @mmc: the host to regulate
++ *
++ * Returns 0 or errno. Enables the regulator for vqmmc.
++ * Keeps track of the enable status for ensuring that calls to
++ * regulator_enable/disable are balanced.
++ */
++int mmc_regulator_enable_vqmmc(struct mmc_host *mmc)
++{
++ int ret = 0;
++
++ if (!IS_ERR(mmc->supply.vqmmc) && !mmc->vqmmc_enabled) {
++ ret = regulator_enable(mmc->supply.vqmmc);
++ if (ret < 0)
++ dev_err(mmc_dev(mmc), "enabling vqmmc regulator failed\n");
++ else
++ mmc->vqmmc_enabled = true;
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(mmc_regulator_enable_vqmmc);
++
++/**
++ * mmc_regulator_disable_vqmmc - disable VQMMC regulator for a host
++ * @mmc: the host to regulate
++ *
++ * Returns 0 or errno. Disables the regulator for vqmmc.
++ * Keeps track of the enable status for ensuring that calls to
++ * regulator_enable/disable are balanced.
++ */
++void mmc_regulator_disable_vqmmc(struct mmc_host *mmc)
++{
++ if (!IS_ERR(mmc->supply.vqmmc) && mmc->vqmmc_enabled) {
++ regulator_disable(mmc->supply.vqmmc);
++ mmc->vqmmc_enabled = false;
++ }
++}
++EXPORT_SYMBOL_GPL(mmc_regulator_disable_vqmmc);
+diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
+index b3d7d6d8d6548..41e94cd141098 100644
+--- a/drivers/mmc/host/cqhci-core.c
++++ b/drivers/mmc/host/cqhci-core.c
+@@ -942,8 +942,8 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
+ ret = cqhci_tasks_cleared(cq_host);
+
+ if (!ret)
+- pr_debug("%s: cqhci: Failed to clear tasks\n",
+- mmc_hostname(mmc));
++ pr_warn("%s: cqhci: Failed to clear tasks\n",
++ mmc_hostname(mmc));
+
+ return ret;
+ }
+@@ -976,7 +976,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
+ ret = cqhci_halted(cq_host);
+
+ if (!ret)
+- pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
++ pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+
+ return ret;
+ }
+@@ -984,10 +984,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
+ /*
+ * After halting we expect to be able to use the command line. We interpret the
+ * failure to halt to mean the data lines might still be in use (and the upper
+- * layers will need to send a STOP command), so we set the timeout based on a
+- * generous command timeout.
++ * layers will need to send a STOP command), however failing to halt complicates
++ * the recovery, so set a timeout that would reasonably allow I/O to complete.
+ */
+-#define CQHCI_START_HALT_TIMEOUT 5
++#define CQHCI_START_HALT_TIMEOUT 500
+
+ static void cqhci_recovery_start(struct mmc_host *mmc)
+ {
+@@ -1075,28 +1075,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
+
+ ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+
+- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+- ok = false;
+-
+ /*
+ * The specification contradicts itself, by saying that tasks cannot be
+ * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
+ * be disabled/re-enabled, but not to disable before clearing tasks.
+ * Have a go anyway.
+ */
+- if (!ok) {
+- pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
+- cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+- cqcfg &= ~CQHCI_ENABLE;
+- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+- cqcfg |= CQHCI_ENABLE;
+- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+- /* Be sure that there are no tasks */
+- ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+- ok = false;
+- WARN_ON(!ok);
+- }
++ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
++ ok = false;
++
++ /* Disable to make sure tasks really are cleared */
++ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
++ cqcfg &= ~CQHCI_ENABLE;
++ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
++
++ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
++ cqcfg |= CQHCI_ENABLE;
++ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
++
++ cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
++
++ if (!ok)
++ cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
+
+ cqhci_recover_mrqs(cq_host);
+
+diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
+index 33d7039c19169..3b5b5c139206d 100644
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -801,6 +801,32 @@ static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
+ sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG);
+ }
+
++static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot,
++ bool enable)
++{
++ struct pci_dev *pdev = slot->chip->pdev;
++ u32 value;
++
++ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
++ value &= ~GLI_9763E_VHS_REV;
++ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
++ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
++
++ pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
++
++ if (enable)
++ value &= ~GLI_9763E_CFG_LPSN_DIS;
++ else
++ value |= GLI_9763E_CFG_LPSN_DIS;
++
++ pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
++
++ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
++ value &= ~GLI_9763E_VHS_REV;
++ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
++ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
++}
++
+ static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
+ unsigned int timing)
+ {
+@@ -909,6 +935,9 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot)
+ if (ret)
+ goto cleanup;
+
++ /* Disable LPM negotiation to avoid entering L1 state. */
++ gl9763e_set_low_power_negotiation(slot, false);
++
+ return 0;
+
+ cleanup:
+@@ -960,31 +989,6 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
+ }
+
+ #ifdef CONFIG_PM
+-static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
+-{
+- struct pci_dev *pdev = slot->chip->pdev;
+- u32 value;
+-
+- pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+- value &= ~GLI_9763E_VHS_REV;
+- value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
+- pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+-
+- pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
+-
+- if (enable)
+- value &= ~GLI_9763E_CFG_LPSN_DIS;
+- else
+- value |= GLI_9763E_CFG_LPSN_DIS;
+-
+- pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
+-
+- pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+- value &= ~GLI_9763E_VHS_REV;
+- value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
+- pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+-}
+-
+ static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
+ {
+ struct sdhci_pci_slot *slot = chip->slots[0];
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index 525f979e2a974..2101b6e794c0e 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -405,12 +405,33 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
+ mmc_request_done(host->mmc, mrq);
+ }
+
++static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
++ unsigned short vdd)
++{
++ struct mmc_host *mmc = host->mmc;
++
++ switch (mode) {
++ case MMC_POWER_OFF:
++ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0);
++
++ mmc_regulator_disable_vqmmc(mmc);
++ break;
++ case MMC_POWER_ON:
++ mmc_regulator_enable_vqmmc(mmc);
++ break;
++ case MMC_POWER_UP:
++ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd);
++ break;
++ }
++}
++
+ static struct sdhci_ops sdhci_sprd_ops = {
+ .read_l = sdhci_sprd_readl,
+ .write_l = sdhci_sprd_writel,
+ .write_w = sdhci_sprd_writew,
+ .write_b = sdhci_sprd_writeb,
+ .set_clock = sdhci_sprd_set_clock,
++ .set_power = sdhci_sprd_set_power,
+ .get_max_clock = sdhci_sprd_get_max_clock,
+ .get_min_clock = sdhci_sprd_get_min_clock,
+ .set_bus_width = sdhci_set_bus_width,
+@@ -676,6 +697,10 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
+ SDHCI_SUPPORT_DDR50);
+
++ ret = mmc_regulator_get_supply(host->mmc);
++ if (ret)
++ goto pm_runtime_disable;
++
+ ret = sdhci_setup_host(host);
+ if (ret)
+ goto pm_runtime_disable;
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index 6383d9805dac9..b58162ce81d87 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -1043,14 +1043,12 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
+ dma_addr_t addr;
+
+ buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
+-
+- /* If there's enough room to align the FD address, do it.
+- * It will help hardware optimize accesses.
+- */
+ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
+ DPAA2_ETH_TX_BUF_ALIGN);
+ if (aligned_start >= skb->head)
+ buffer_start = aligned_start;
++ else
++ return -ENOMEM;
+
+ /* Store a backpointer to the skb at the beginning of the buffer
+ * (in the private data area) such that we can release it
+@@ -4738,6 +4736,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
+ if (err)
+ goto err_dl_port_add;
+
++ net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
++
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() failed\n");
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+index 447718483ef47..e703846adc9f0 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+@@ -702,7 +702,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
+
+ static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
+ {
+- unsigned int headroom = DPAA2_ETH_SWA_SIZE;
++ unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
+
+ /* If we don't have an skb (e.g. XDP buffer), we only need space for
+ * the software annotation area
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+index 11eeb36cf9a54..a0c31f5b2ce05 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+@@ -232,7 +232,7 @@ M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
+ M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
+ npc_install_flow_req, npc_install_flow_rsp) \
+ M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
+- npc_delete_flow_req, msg_rsp) \
++ npc_delete_flow_req, npc_delete_flow_rsp) \
+ M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
+ npc_mcam_read_entry_req, \
+ npc_mcam_read_entry_rsp) \
+@@ -1471,6 +1471,8 @@ struct npc_install_flow_req {
+ u8 vtag0_op;
+ u16 vtag1_def;
+ u8 vtag1_op;
++ /* old counter value */
++ u16 cntr_val;
+ };
+
+ struct npc_install_flow_rsp {
+@@ -1486,6 +1488,11 @@ struct npc_delete_flow_req {
+ u8 all; /* PF + VFs */
+ };
+
++struct npc_delete_flow_rsp {
++ struct mbox_msghdr hdr;
++ u16 cntr_val;
++};
++
+ struct npc_mcam_read_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* MCAM entry to read */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 1f3a8cf42765e..7310047136986 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -5236,6 +5236,8 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
++ if (idx == MAX_BANDPROF_PER_PFFUNC)
++ break;
+ prof_idx = req->prof_idx[layer][idx];
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+@@ -5249,8 +5251,6 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+- if (idx == MAX_BANDPROF_PER_PFFUNC)
+- break;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+index 1eb5eb29a2ba6..80d6aa3f14c11 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+@@ -1184,7 +1184,7 @@ find_rule:
+ write_req.enable_entry = (u8)enable;
+ /* if counter is available then clear and use it */
+ if (req->set_cntr && rule->has_cntr) {
+- rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00);
++ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val);
+ write_req.set_cntr = 1;
+ write_req.cntr = rule->cntr;
+ }
+@@ -1399,12 +1399,13 @@ static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
+
+ int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
+ struct npc_delete_flow_req *req,
+- struct msg_rsp *rsp)
++ struct npc_delete_flow_rsp *rsp)
+ {
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *iter, *tmp;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct list_head del_list;
++ int blkaddr;
+
+ INIT_LIST_HEAD(&del_list);
+
+@@ -1420,6 +1421,10 @@ int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
+ list_move_tail(&iter->list, &del_list);
+ /* single rule */
+ } else if (req->entry == iter->entry) {
++ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
++ if (blkaddr)
++ rsp->cntr_val = rvu_read64(rvu, blkaddr,
++ NPC_AF_MATCH_STATX(iter->cntr));
+ list_move_tail(&iter->list, &del_list);
+ break;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
+index 3392487f6b47b..329b5a02914d7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
+@@ -145,6 +145,7 @@ void rvu_switch_enable(struct rvu *rvu)
+ struct npc_mcam_alloc_entry_req alloc_req = { 0 };
+ struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
+ struct npc_delete_flow_req uninstall_req = { 0 };
++ struct npc_delete_flow_rsp uninstall_rsp = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct msg_rsp rsp;
+@@ -184,7 +185,7 @@ void rvu_switch_enable(struct rvu *rvu)
+ uninstall_rules:
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+- rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
++ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
+ kfree(rswitch->entry2pcifunc);
+ free_entries:
+ free_req.all = 1;
+@@ -196,6 +197,7 @@ exit:
+ void rvu_switch_disable(struct rvu *rvu)
+ {
+ struct npc_delete_flow_req uninstall_req = { 0 };
++ struct npc_delete_flow_rsp uninstall_rsp = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct rvu_hwinfo *hw = rvu->hw;
+@@ -232,7 +234,7 @@ void rvu_switch_disable(struct rvu *rvu)
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ free_req.all = 1;
+- rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
++ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+ rswitch->used_entries = 0;
+ kfree(rswitch->entry2pcifunc);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+index 826f691de2595..59d8d1ba15c28 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+@@ -448,6 +448,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ aq->prof.pebs_mantissa = 0;
+ aq->prof_mask.pebs_mantissa = 0xFF;
+
++ aq->prof.hl_en = 0;
++ aq->prof_mask.hl_en = 1;
++
+ /* Fill AQ info */
+ aq->qidx = profile;
+ aq->ctype = NIX_AQ_CTYPE_BANDPROF;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index efd66224b3dbf..44950c2542bb7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -339,13 +339,8 @@ struct otx2_flow_config {
+ struct list_head flow_list;
+ u32 dmacflt_max_flows;
+ u16 max_flows;
+-};
+-
+-struct otx2_tc_info {
+- /* hash table to store TC offloaded flows */
+- struct rhashtable flow_table;
+- struct rhashtable_params flow_ht_params;
+- unsigned long *tc_entries_bitmap;
++ struct list_head flow_list_tc;
++ bool ntuple;
+ };
+
+ struct dev_hw_ops {
+@@ -465,7 +460,6 @@ struct otx2_nic {
+ /* NPC MCAM */
+ struct otx2_flow_config *flow_cfg;
+ struct otx2_mac_table *mac_table;
+- struct otx2_tc_info tc_info;
+
+ u64 reset_count;
+ struct work_struct reset_task;
+@@ -1024,7 +1018,8 @@ int otx2_init_tc(struct otx2_nic *nic);
+ void otx2_shutdown_tc(struct otx2_nic *nic);
+ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
+-int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
++void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);
++
+ /* CGX/RPM DMAC filters support */
+ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
+ int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+index 777a27047c8e8..5f71a72f95e50 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+@@ -41,7 +41,6 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
+ return 0;
+
+ otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
+- otx2_tc_alloc_ent_bitmap(pfvf);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+index 0eb74e8c553dd..aaf1af2a402ec 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+@@ -753,6 +753,7 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
++ pfvf->flow_cfg->ntuple = ntuple;
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = otx2_set_rss_hash_opts(pfvf, nfc);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+index 5c4a4d3557702..5c757508322b9 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+@@ -276,6 +276,7 @@ int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
+
+ flow_cfg = pfvf->flow_cfg;
+ INIT_LIST_HEAD(&flow_cfg->flow_list);
++ INIT_LIST_HEAD(&flow_cfg->flow_list_tc);
+ flow_cfg->max_flows = 0;
+
+ return 0;
+@@ -298,6 +299,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
++ INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
+
+ /* Allocate bare minimum number of MCAM entries needed for
+ * unicast and ntuple filters.
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 1d2d72c60a12c..18c5d2b3f7f95 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -566,7 +566,9 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
+ otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
+ TYPE_PFVF);
+- vfs -= 64;
++ if (intr)
++ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
++ vfs = 64;
+ }
+
+ intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
+@@ -574,7 +576,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+
+ otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+
+- trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
++ if (intr)
++ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+
+ return IRQ_HANDLED;
+ }
+@@ -1855,6 +1858,8 @@ int otx2_open(struct net_device *netdev)
+ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_reinstall_flows(pf);
+
++ otx2_tc_apply_ingress_police_rules(pf);
++
+ err = otx2_rxtx_enable(pf, true);
+ /* If a mbox communication error happens at this point then interface
+ * will end up in a state such that it is in down state but hardware
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+index 1aeb18a901b13..bb77ab7ddfefd 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+@@ -48,9 +48,8 @@ struct otx2_tc_flow_stats {
+ };
+
+ struct otx2_tc_flow {
+- struct rhash_head node;
++ struct list_head list;
+ unsigned long cookie;
+- unsigned int bitpos;
+ struct rcu_head rcu;
+ struct otx2_tc_flow_stats stats;
+ spinlock_t lock; /* lock for stats */
+@@ -58,31 +57,13 @@ struct otx2_tc_flow {
+ u16 entry;
+ u16 leaf_profile;
+ bool is_act_police;
++ u32 prio;
++ struct npc_install_flow_req req;
++ u64 rate;
++ u32 burst;
++ bool is_pps;
+ };
+
+-int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
+-{
+- struct otx2_tc_info *tc = &nic->tc_info;
+-
+- if (!nic->flow_cfg->max_flows)
+- return 0;
+-
+- /* Max flows changed, free the existing bitmap */
+- kfree(tc->tc_entries_bitmap);
+-
+- tc->tc_entries_bitmap =
+- kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows),
+- sizeof(long), GFP_KERNEL);
+- if (!tc->tc_entries_bitmap) {
+- netdev_err(nic->netdev,
+- "Unable to alloc TC flow entries bitmap\n");
+- return -ENOMEM;
+- }
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
+-
+ static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
+ u32 *burst_exp, u32 *burst_mantissa)
+ {
+@@ -321,21 +302,10 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
+ return err;
+ }
+
+-static int otx2_tc_act_set_police(struct otx2_nic *nic,
+- struct otx2_tc_flow *node,
+- struct flow_cls_offload *f,
+- u64 rate, u32 burst, u32 mark,
+- struct npc_install_flow_req *req, bool pps)
++static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
++ struct otx2_tc_flow *node)
+ {
+- struct netlink_ext_ack *extack = f->common.extack;
+- struct otx2_hw *hw = &nic->hw;
+- int rq_idx, rc;
+-
+- rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
+- if (rq_idx >= hw->rx_queues) {
+- NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
+- return -EINVAL;
+- }
++ int rc;
+
+ mutex_lock(&nic->mbox.lock);
+
+@@ -345,23 +315,17 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ return rc;
+ }
+
+- rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
++ rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
++ node->burst, node->rate, node->is_pps);
+ if (rc)
+ goto free_leaf;
+
+- rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
++ rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
+ if (rc)
+ goto free_leaf;
+
+ mutex_unlock(&nic->mbox.lock);
+
+- req->match_id = mark & 0xFFFFULL;
+- req->index = rq_idx;
+- req->op = NIX_RX_ACTIONOP_UCAST;
+- set_bit(rq_idx, &nic->rq_bmap);
+- node->is_act_police = true;
+- node->rq = rq_idx;
+-
+ return 0;
+
+ free_leaf:
+@@ -373,6 +337,39 @@ free_leaf:
+ return rc;
+ }
+
++static int otx2_tc_act_set_police(struct otx2_nic *nic,
++ struct otx2_tc_flow *node,
++ struct flow_cls_offload *f,
++ u64 rate, u32 burst, u32 mark,
++ struct npc_install_flow_req *req, bool pps)
++{
++ struct netlink_ext_ack *extack = f->common.extack;
++ struct otx2_hw *hw = &nic->hw;
++ int rq_idx, rc;
++
++ rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
++ if (rq_idx >= hw->rx_queues) {
++ NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
++ return -EINVAL;
++ }
++
++ req->match_id = mark & 0xFFFFULL;
++ req->index = rq_idx;
++ req->op = NIX_RX_ACTIONOP_UCAST;
++
++ node->is_act_police = true;
++ node->rq = rq_idx;
++ node->burst = burst;
++ node->rate = rate;
++ node->is_pps = pps;
++
++ rc = otx2_tc_act_set_hw_police(nic, node);
++ if (!rc)
++ set_bit(rq_idx, &nic->rq_bmap);
++
++ return rc;
++}
++
+ static int otx2_tc_parse_actions(struct otx2_nic *nic,
+ struct flow_action *flow_action,
+ struct npc_install_flow_req *req,
+@@ -689,8 +686,117 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
+ return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
+ }
+
+-static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
++static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf)
++{
++ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
++ struct otx2_tc_flow *iter, *tmp;
++
++ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
++ return;
++
++ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) {
++ list_del(&iter->list);
++ kfree(iter);
++ flow_cfg->nr_flows--;
++ }
++}
++
++static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg,
++ unsigned long cookie)
++{
++ struct otx2_tc_flow *tmp;
++
++ list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
++ if (tmp->cookie == cookie)
++ return tmp;
++ }
++
++ return NULL;
++}
++
++static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg,
++ int index)
++{
++ struct otx2_tc_flow *tmp;
++ int i = 0;
++
++ list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
++ if (i == index)
++ return tmp;
++ i++;
++ }
++
++ return NULL;
++}
++
++static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg,
++ struct otx2_tc_flow *node)
++{
++ struct list_head *pos, *n;
++ struct otx2_tc_flow *tmp;
++
++ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
++ tmp = list_entry(pos, struct otx2_tc_flow, list);
++ if (node == tmp) {
++ list_del(&node->list);
++ return;
++ }
++ }
++}
++
++static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg,
++ struct otx2_tc_flow *node)
++{
++ struct list_head *pos, *n;
++ struct otx2_tc_flow *tmp;
++ int index = 0;
++
++ /* If the flow list is empty then add the new node */
++ if (list_empty(&flow_cfg->flow_list_tc)) {
++ list_add(&node->list, &flow_cfg->flow_list_tc);
++ return index;
++ }
++
++ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
++ tmp = list_entry(pos, struct otx2_tc_flow, list);
++ if (node->prio < tmp->prio)
++ break;
++ index++;
++ }
++
++ list_add(&node->list, pos->prev);
++ return index;
++}
++
++static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req)
++{
++ struct npc_install_flow_req *tmp_req;
++ int err;
++
++ mutex_lock(&nic->mbox.lock);
++ tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
++ if (!tmp_req) {
++ mutex_unlock(&nic->mbox.lock);
++ return -ENOMEM;
++ }
++
++ memcpy(tmp_req, req, sizeof(struct npc_install_flow_req));
++ /* Send message to AF */
++ err = otx2_sync_mbox_msg(&nic->mbox);
++ if (err) {
++ netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n",
++ req->entry);
++ mutex_unlock(&nic->mbox.lock);
++ return -EFAULT;
++ }
++
++ mutex_unlock(&nic->mbox.lock);
++ return 0;
++}
++
++static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val)
+ {
++ struct npc_delete_flow_rsp *rsp;
+ struct npc_delete_flow_req *req;
+ int err;
+
+@@ -711,22 +817,113 @@ static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
++
++ if (cntr_val) {
++ rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
++ 0, &req->hdr);
++ if (IS_ERR(rsp)) {
++ netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n",
++ entry);
++ mutex_unlock(&nic->mbox.lock);
++ return -EFAULT;
++ }
++
++ *cntr_val = rsp->cntr_val;
++ }
++
+ mutex_unlock(&nic->mbox.lock);
++ return 0;
++}
++
++static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic,
++ struct otx2_flow_config *flow_cfg,
++ struct otx2_tc_flow *node)
++{
++ struct list_head *pos, *n;
++ struct otx2_tc_flow *tmp;
++ int i = 0, index = 0;
++ u16 cntr_val = 0;
++
++ /* Find and delete the entry from the list and re-install
++ * all the entries from beginning to the index of the
++ * deleted entry to higher mcam indexes.
++ */
++ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
++ tmp = list_entry(pos, struct otx2_tc_flow, list);
++ if (node == tmp) {
++ list_del(&tmp->list);
++ break;
++ }
++
++ otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
++ tmp->entry++;
++ tmp->req.entry = tmp->entry;
++ tmp->req.cntr_val = cntr_val;
++ index++;
++ }
++
++ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
++ if (i == index)
++ break;
++
++ tmp = list_entry(pos, struct otx2_tc_flow, list);
++ otx2_add_mcam_flow_entry(nic, &tmp->req);
++ i++;
++ }
+
+ return 0;
+ }
+
++static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic,
++ struct otx2_flow_config *flow_cfg,
++ struct otx2_tc_flow *node)
++{
++ int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1;
++ struct otx2_tc_flow *tmp;
++ int list_idx, i;
++ u16 cntr_val = 0;
++
++ /* Find the index of the entry(list_idx) whose priority
++ * is greater than the new entry and re-install all
++ * the entries from beginning to list_idx to higher
++ * mcam indexes.
++ */
++ list_idx = otx2_tc_add_to_flow_list(flow_cfg, node);
++ for (i = 0; i < list_idx; i++) {
++ tmp = otx2_tc_get_entry_by_index(flow_cfg, i);
++ if (!tmp)
++ return -ENOMEM;
++
++ otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
++ tmp->entry = flow_cfg->flow_ent[mcam_idx];
++ tmp->req.entry = tmp->entry;
++ tmp->req.cntr_val = cntr_val;
++ otx2_add_mcam_flow_entry(nic, &tmp->req);
++ mcam_idx++;
++ }
++
++ return mcam_idx;
++}
++
++static int otx2_tc_update_mcam_table(struct otx2_nic *nic,
++ struct otx2_flow_config *flow_cfg,
++ struct otx2_tc_flow *node,
++ bool add_req)
++{
++ if (add_req)
++ return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node);
++
++ return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node);
++}
++
+ static int otx2_tc_del_flow(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+ {
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+- struct otx2_tc_info *tc_info = &nic->tc_info;
+ struct otx2_tc_flow *flow_node;
+ int err;
+
+- flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
+- &tc_flow_cmd->cookie,
+- tc_info->flow_ht_params);
++ flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
+ if (!flow_node) {
+ netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
+ tc_flow_cmd->cookie);
+@@ -734,6 +931,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
+ }
+
+ if (flow_node->is_act_police) {
++ __clear_bit(flow_node->rq, &nic->rq_bmap);
++
++ if (nic->flags & OTX2_FLAG_INTF_DOWN)
++ goto free_mcam_flow;
++
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
+@@ -749,21 +951,14 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ flow_node->leaf_profile);
+
+- __clear_bit(flow_node->rq, &nic->rq_bmap);
+-
+ mutex_unlock(&nic->mbox.lock);
+ }
+
+- otx2_del_mcam_flow_entry(nic, flow_node->entry);
+-
+- WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
+- &flow_node->node,
+- nic->tc_info.flow_ht_params));
++free_mcam_flow:
++ otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
++ otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
+ kfree_rcu(flow_node, rcu);
+-
+- clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
+ flow_cfg->nr_flows--;
+-
+ return 0;
+ }
+
+@@ -772,15 +967,19 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+ {
+ struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+- struct otx2_tc_info *tc_info = &nic->tc_info;
+ struct otx2_tc_flow *new_node, *old_node;
+ struct npc_install_flow_req *req, dummy;
+- int rc, err;
++ int rc, err, mcam_idx;
+
+ if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
+ return -ENOMEM;
+
+- if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) {
++ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
++ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
++ return -EINVAL;
++ }
++
++ if (flow_cfg->nr_flows == flow_cfg->max_flows) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Free MCAM entry not available to add the flow");
+ return -ENOMEM;
+@@ -792,6 +991,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+ return -ENOMEM;
+ spin_lock_init(&new_node->lock);
+ new_node->cookie = tc_flow_cmd->cookie;
++ new_node->prio = tc_flow_cmd->common.prio;
+
+ memset(&dummy, 0, sizeof(struct npc_install_flow_req));
+
+@@ -802,12 +1002,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+ }
+
+ /* If a flow exists with the same cookie, delete it */
+- old_node = rhashtable_lookup_fast(&tc_info->flow_table,
+- &tc_flow_cmd->cookie,
+- tc_info->flow_ht_params);
++ old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
+ if (old_node)
+ otx2_tc_del_flow(nic, tc_flow_cmd);
+
++ mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true);
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!req) {
+@@ -818,11 +1017,8 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+
+ memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
+ memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
+-
+- new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
+- flow_cfg->max_flows);
+ req->channel = nic->hw.rx_chan_base;
+- req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1];
++ req->entry = flow_cfg->flow_ent[mcam_idx];
+ req->intf = NIX_INTF_RX;
+ req->set_cntr = 1;
+ new_node->entry = req->entry;
+@@ -832,26 +1028,18 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
+ mutex_unlock(&nic->mbox.lock);
+- kfree_rcu(new_node, rcu);
+ goto free_leaf;
+ }
+- mutex_unlock(&nic->mbox.lock);
+
+- /* add new flow to flow-table */
+- rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
+- nic->tc_info.flow_ht_params);
+- if (rc) {
+- otx2_del_mcam_flow_entry(nic, req->entry);
+- kfree_rcu(new_node, rcu);
+- goto free_leaf;
+- }
++ mutex_unlock(&nic->mbox.lock);
++ memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req));
+
+- set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
+ flow_cfg->nr_flows++;
+-
+ return 0;
+
+ free_leaf:
++ otx2_tc_del_from_flow_list(flow_cfg, new_node);
++ kfree_rcu(new_node, rcu);
+ if (new_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+@@ -878,16 +1066,13 @@ free_leaf:
+ static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+ {
+- struct otx2_tc_info *tc_info = &nic->tc_info;
+ struct npc_mcam_get_stats_req *req;
+ struct npc_mcam_get_stats_rsp *rsp;
+ struct otx2_tc_flow_stats *stats;
+ struct otx2_tc_flow *flow_node;
+ int err;
+
+- flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
+- &tc_flow_cmd->cookie,
+- tc_info->flow_ht_params);
++ flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie);
+ if (!flow_node) {
+ netdev_info(nic->netdev, "tc flow not found for cookie %lx",
+ tc_flow_cmd->cookie);
+@@ -1035,12 +1220,20 @@ static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+ {
+ struct otx2_nic *nic = cb_priv;
++ bool ntuple;
+
+ if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
+ return -EOPNOTSUPP;
+
++ ntuple = nic->netdev->features & NETIF_F_NTUPLE;
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
++ if (ntuple) {
++ netdev_warn(nic->netdev,
++ "Can't install TC flower offload rule when NTUPLE is active");
++ return -EOPNOTSUPP;
++ }
++
+ return otx2_setup_tc_cls_flower(nic, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return otx2_setup_tc_ingress_matchall(nic, type_data);
+@@ -1123,18 +1316,8 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ }
+ EXPORT_SYMBOL(otx2_setup_tc);
+
+-static const struct rhashtable_params tc_flow_ht_params = {
+- .head_offset = offsetof(struct otx2_tc_flow, node),
+- .key_offset = offsetof(struct otx2_tc_flow, cookie),
+- .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
+- .automatic_shrinking = true,
+-};
+-
+ int otx2_init_tc(struct otx2_nic *nic)
+ {
+- struct otx2_tc_info *tc = &nic->tc_info;
+- int err;
+-
+ /* Exclude receive queue 0 being used for police action */
+ set_bit(0, &nic->rq_bmap);
+
+@@ -1144,25 +1327,54 @@ int otx2_init_tc(struct otx2_nic *nic)
+ return -EINVAL;
+ }
+
+- err = otx2_tc_alloc_ent_bitmap(nic);
+- if (err)
+- return err;
+-
+- tc->flow_ht_params = tc_flow_ht_params;
+- err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
+- if (err) {
+- kfree(tc->tc_entries_bitmap);
+- tc->tc_entries_bitmap = NULL;
+- }
+- return err;
++ return 0;
+ }
+ EXPORT_SYMBOL(otx2_init_tc);
+
+ void otx2_shutdown_tc(struct otx2_nic *nic)
+ {
+- struct otx2_tc_info *tc = &nic->tc_info;
+-
+- kfree(tc->tc_entries_bitmap);
+- rhashtable_destroy(&tc->flow_table);
++ otx2_destroy_tc_flow_list(nic);
+ }
+ EXPORT_SYMBOL(otx2_shutdown_tc);
++
++static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
++ struct otx2_tc_flow *node)
++{
++ struct npc_install_flow_req *req;
++
++ if (otx2_tc_act_set_hw_police(nic, node))
++ return;
++
++ mutex_lock(&nic->mbox.lock);
++
++ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
++ if (!req)
++ goto err;
++
++ memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
++
++ if (otx2_sync_mbox_msg(&nic->mbox))
++ netdev_err(nic->netdev,
++ "Failed to install MCAM flow entry for ingress rule");
++err:
++ mutex_unlock(&nic->mbox.lock);
++}
++
++void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
++{
++ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
++ struct otx2_tc_flow *node;
++
++ /* If any ingress policer rules exist for the interface then
++ * apply those rules. Ingress policer rules depend on bandwidth
++ * profiles linked to the receive queues. Since no receive queues
++ * exist when interface is down, ingress policer rules are stored
++ * and configured in hardware after all receive queues are allocated
++ * in otx2_open.
++ */
++ list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
++ if (node->is_act_police)
++ otx2_tc_config_ingress_rule(nic, node);
++ }
++}
++EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 770391cefb4e4..abfa375b08878 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -576,6 +576,8 @@ struct rtl8169_tc_offsets {
+ enum rtl_flag {
+ RTL_FLAG_TASK_ENABLED = 0,
+ RTL_FLAG_TASK_RESET_PENDING,
++ RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
++ RTL_FLAG_TASK_TX_TIMEOUT,
+ RTL_FLAG_MAX
+ };
+
+@@ -3943,7 +3945,7 @@ static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ {
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
++ rtl_schedule_task(tp, RTL_FLAG_TASK_TX_TIMEOUT);
+ }
+
+ static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
+@@ -4537,6 +4539,7 @@ static void rtl_task(struct work_struct *work)
+ {
+ struct rtl8169_private *tp =
+ container_of(work, struct rtl8169_private, wk.work);
++ int ret;
+
+ rtnl_lock();
+
+@@ -4544,9 +4547,21 @@ static void rtl_task(struct work_struct *work)
+ !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
+ goto out_unlock;
+
++ if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) {
++ /* ASPM compatibility issues are a typical reason for tx timeouts */
++ ret = pci_disable_link_state(tp->pci_dev, PCIE_LINK_STATE_L1 |
++ PCIE_LINK_STATE_L0S);
++ if (!ret)
++ netdev_warn_once(tp->dev, "ASPM disabled on Tx timeout\n");
++ goto reset;
++ }
++
+ if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) {
++reset:
+ rtl_reset_work(tp);
+ netif_wake_queue(tp->dev);
++ } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
++ rtl_reset_work(tp);
+ }
+ out_unlock:
+ rtnl_unlock();
+@@ -4580,7 +4595,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
+ } else {
+ /* In few cases rx is broken after link-down otherwise */
+ if (rtl_is_8125(tp))
+- rtl_reset_work(tp);
++ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
+ pm_runtime_idle(d);
+ }
+
+@@ -4656,7 +4671,7 @@ static int rtl8169_close(struct net_device *dev)
+ rtl8169_down(tp);
+ rtl8169_rx_clear(tp);
+
+- cancel_work_sync(&tp->wk.work);
++ cancel_work(&tp->wk.work);
+
+ free_irq(tp->irq, tp);
+
+@@ -4890,6 +4905,8 @@ static void rtl_remove_one(struct pci_dev *pdev)
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
+
++ cancel_work_sync(&tp->wk.work);
++
+ unregister_netdev(tp->dev);
+
+ if (tp->dash_type != RTL_DASH_NONE)
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 9a52283d77544..68cb5616ef991 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -517,6 +517,15 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
+ {
+ struct ravb_private *priv = netdev_priv(ndev);
+
++ if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
++ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
++ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
++ } else {
++ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
++ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
++ CXR31_SEL_LINK0);
++ }
++
+ /* Receive frame limit set register */
+ ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
+
+@@ -539,14 +548,6 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
+
+ /* E-MAC interrupt enable register */
+ ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
+-
+- if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
+- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
+- ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
+- } else {
+- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
+- CXR31_SEL_LINK0);
+- }
+ }
+
+ static void ravb_emac_init_rcar(struct net_device *ndev)
+@@ -1827,19 +1828,20 @@ static int ravb_open(struct net_device *ndev)
+ if (info->gptp)
+ ravb_ptp_init(ndev, priv->pdev);
+
+- netif_tx_start_all_queues(ndev);
+-
+ /* PHY control start */
+ error = ravb_phy_start(ndev);
+ if (error)
+ goto out_ptp_stop;
+
++ netif_tx_start_all_queues(ndev);
++
+ return 0;
+
+ out_ptp_stop:
+ /* Stop PTP Clock driver */
+ if (info->gptp)
+ ravb_ptp_stop(ndev);
++ ravb_stop_dma(ndev);
+ out_free_irq_mgmta:
+ if (!info->multi_irqs)
+ goto out_free_irq;
+@@ -1890,6 +1892,12 @@ static void ravb_tx_timeout_work(struct work_struct *work)
+ struct net_device *ndev = priv->ndev;
+ int error;
+
++ if (!rtnl_trylock()) {
++ usleep_range(1000, 2000);
++ schedule_work(&priv->work);
++ return;
++ }
++
+ netif_tx_stop_all_queues(ndev);
+
+ /* Stop PTP Clock driver */
+@@ -1923,7 +1931,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
+ */
+ netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
+ __func__, error);
+- return;
++ goto out_unlock;
+ }
+ ravb_emac_init(ndev);
+
+@@ -1933,6 +1941,9 @@ out:
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_tx_start_all_queues(ndev);
++
++out_unlock:
++ rtnl_unlock();
+ }
+
+ /* Packet transmit function for Ethernet AVB */
+@@ -2661,9 +2672,14 @@ static int ravb_probe(struct platform_device *pdev)
+ ndev->features = info->net_features;
+ ndev->hw_features = info->net_hw_features;
+
+- reset_control_deassert(rstc);
++ error = reset_control_deassert(rstc);
++ if (error)
++ goto out_free_netdev;
++
+ pm_runtime_enable(&pdev->dev);
+- pm_runtime_get_sync(&pdev->dev);
++ error = pm_runtime_resume_and_get(&pdev->dev);
++ if (error < 0)
++ goto out_rpm_disable;
+
+ if (info->multi_irqs) {
+ if (info->err_mgmt_irqs)
+@@ -2888,11 +2904,12 @@ out_disable_gptp_clk:
+ out_disable_refclk:
+ clk_disable_unprepare(priv->refclk);
+ out_release:
+- free_netdev(ndev);
+-
+ pm_runtime_put(&pdev->dev);
++out_rpm_disable:
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(rstc);
++out_free_netdev:
++ free_netdev(ndev);
+ return error;
+ }
+
+@@ -2902,22 +2919,26 @@ static int ravb_remove(struct platform_device *pdev)
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
+- /* Stop PTP Clock driver */
+- if (info->ccc_gac)
+- ravb_ptp_stop(ndev);
+-
+- clk_disable_unprepare(priv->gptp_clk);
+- clk_disable_unprepare(priv->refclk);
+-
+- /* Set reset mode */
+- ravb_write(ndev, CCC_OPC_RESET, CCC);
+ unregister_netdev(ndev);
+ if (info->nc_queues)
+ netif_napi_del(&priv->napi[RAVB_NC]);
+ netif_napi_del(&priv->napi[RAVB_BE]);
++
+ ravb_mdio_release(priv);
++
++ /* Stop PTP Clock driver */
++ if (info->ccc_gac)
++ ravb_ptp_stop(ndev);
++
+ dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
+ priv->desc_bat_dma);
++
++ /* Set reset mode */
++ ravb_write(ndev, CCC_OPC_RESET, CCC);
++
++ clk_disable_unprepare(priv->gptp_clk);
++ clk_disable_unprepare(priv->refclk);
++
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(priv->rstc);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+index ea4910ae0921a..6a7c1d325c464 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+@@ -177,8 +177,10 @@
+ #define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
+ #define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+
++#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
+ #define MMC_XGMAC_TX_FPE_FRAG 0x208
+ #define MMC_XGMAC_TX_HOLD_REQ 0x20c
++#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
+ #define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
+ #define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
+ #define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
+@@ -352,6 +354,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
+ {
+ writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
+ writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
++ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK);
++ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
+ }
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 25ddfabc58f73..5b156c5bc04a5 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2058,6 +2058,13 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
+ if (ret)
+ return ret;
+
++ if (id->ncap == 0) {
++ /* namespace not allocated or attached */
++ info->is_removed = true;
++ ret = -ENODEV;
++ goto error;
++ }
++
+ blk_mq_freeze_queue(ns->disk->queue);
+ lbaf = nvme_lbaf_index(id->flbas);
+ ns->lba_shift = id->lbaf[lbaf].ds;
+@@ -2107,6 +2114,8 @@ out:
+ set_bit(NVME_NS_READY, &ns->flags);
+ ret = 0;
+ }
++
++error:
+ kfree(id);
+ return ret;
+ }
+diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+index d4c566c1c8725..1c7fd05ce0280 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
++++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+@@ -120,6 +120,7 @@
+
+ /* ELBI registers */
+ #define ELBI_SYS_STTS 0x08
++#define ELBI_CS2_ENABLE 0xa4
+
+ /* DBI registers */
+ #define DBI_CON_STATUS 0x44
+@@ -252,6 +253,21 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
+ disable_irq(pcie_ep->perst_irq);
+ }
+
++static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
++ u32 reg, size_t size, u32 val)
++{
++ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
++ int ret;
++
++ writel(1, pcie_ep->elbi + ELBI_CS2_ENABLE);
++
++ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
++ if (ret)
++ dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
++
++ writel(0, pcie_ep->elbi + ELBI_CS2_ENABLE);
++}
++
+ static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
+ {
+ int ret;
+@@ -446,6 +462,7 @@ static const struct dw_pcie_ops pci_ops = {
+ .link_up = qcom_pcie_dw_link_up,
+ .start_link = qcom_pcie_dw_start_link,
+ .stop_link = qcom_pcie_dw_stop_link,
++ .write_dbi2 = qcom_pcie_dw_write_dbi2,
+ };
+
+ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 48389785d9247..c132839d99dc8 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -6058,3 +6058,15 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+ #endif
++
++/*
++ * Devices known to require a longer delay before first config space access
++ * after reset recovery or resume from D3cold:
++ *
++ * VideoPropulsion (aka Genroco) Torrent QN16e MPEG QAM Modulator
++ */
++static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev)
++{
++ pdev->d3cold_delay = 1000;
++}
++DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec);
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 9e57f4c62e609..f1962866bb814 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1239,17 +1239,17 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
+ static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
+ {
+ struct pinctrl_setting *setting, *setting2;
+- struct pinctrl_state *old_state = p->state;
++ struct pinctrl_state *old_state = READ_ONCE(p->state);
+ int ret;
+
+- if (p->state) {
++ if (old_state) {
+ /*
+ * For each pinmux setting in the old state, forget SW's record
+ * of mux owner for that pingroup. Any pingroups which are
+ * still owned by the new state will be re-acquired by the call
+ * to pinmux_enable_setting() in the loop below.
+ */
+- list_for_each_entry(setting, &p->state->settings, node) {
++ list_for_each_entry(setting, &old_state->settings, node) {
+ if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
+ continue;
+ pinmux_disable_setting(setting);
+diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
+index 2ff7717530bf8..8a2f18fa3faf5 100644
+--- a/drivers/powercap/dtpm_cpu.c
++++ b/drivers/powercap/dtpm_cpu.c
+@@ -24,7 +24,6 @@
+ #include <linux/of.h>
+ #include <linux/pm_qos.h>
+ #include <linux/slab.h>
+-#include <linux/units.h>
+
+ struct dtpm_cpu {
+ struct dtpm dtpm;
+@@ -104,8 +103,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
+ if (pd->table[i].frequency < freq)
+ continue;
+
+- return scale_pd_power_uw(pd_mask, pd->table[i].power *
+- MICROWATT_PER_MILLIWATT);
++ return scale_pd_power_uw(pd_mask, pd->table[i].power);
+ }
+
+ return 0;
+@@ -122,11 +120,9 @@ static int update_pd_power_uw(struct dtpm *dtpm)
+ nr_cpus = cpumask_weight(&cpus);
+
+ dtpm->power_min = em->table[0].power;
+- dtpm->power_min *= MICROWATT_PER_MILLIWATT;
+ dtpm->power_min *= nr_cpus;
+
+ dtpm->power_max = em->table[em->nr_perf_states - 1].power;
+- dtpm->power_max *= MICROWATT_PER_MILLIWATT;
+ dtpm->power_max *= nr_cpus;
+
+ return 0;
+diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c
+index 91276761a31d9..612c3b59dd5be 100644
+--- a/drivers/powercap/dtpm_devfreq.c
++++ b/drivers/powercap/dtpm_devfreq.c
+@@ -39,10 +39,8 @@ static int update_pd_power_uw(struct dtpm *dtpm)
+ struct em_perf_domain *pd = em_pd_get(dev);
+
+ dtpm->power_min = pd->table[0].power;
+- dtpm->power_min *= MICROWATT_PER_MILLIWATT;
+
+ dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
+- dtpm->power_max *= MICROWATT_PER_MILLIWATT;
+
+ return 0;
+ }
+@@ -54,13 +52,10 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
+ struct device *dev = devfreq->dev.parent;
+ struct em_perf_domain *pd = em_pd_get(dev);
+ unsigned long freq;
+- u64 power;
+ int i;
+
+ for (i = 0; i < pd->nr_perf_states; i++) {
+-
+- power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
+- if (power > power_limit)
++ if (pd->table[i].power > power_limit)
+ break;
+ }
+
+@@ -68,7 +63,7 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
+
+ dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
+
+- power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT;
++ power_limit = pd->table[i - 1].power;
+
+ return power_limit;
+ }
+@@ -110,7 +105,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
+ if (pd->table[i].frequency < freq)
+ continue;
+
+- power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
++ power = pd->table[i].power;
+ power *= status.busy_time;
+ power >>= 10;
+
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index deed8c909a786..31b5273f43a71 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3834,8 +3834,15 @@ static int sd_resume(struct device *dev, bool runtime)
+
+ static int sd_resume_system(struct device *dev)
+ {
+- if (pm_runtime_suspended(dev))
++ if (pm_runtime_suspended(dev)) {
++ struct scsi_disk *sdkp = dev_get_drvdata(dev);
++ struct scsi_device *sdp = sdkp ? sdkp->device : NULL;
++
++ if (sdp && sdp->force_runtime_start_on_system_start)
++ pm_request_resume(dev);
++
+ return 0;
++ }
+
+ return sd_resume(dev, false);
+ }
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 151fef199c380..5d046be8b2dd5 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -3299,33 +3299,52 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_controller);
+
++static inline int __spi_check_suspended(const struct spi_controller *ctlr)
++{
++ return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
++}
++
++static inline void __spi_mark_suspended(struct spi_controller *ctlr)
++{
++ mutex_lock(&ctlr->bus_lock_mutex);
++ ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
++ mutex_unlock(&ctlr->bus_lock_mutex);
++}
++
++static inline void __spi_mark_resumed(struct spi_controller *ctlr)
++{
++ mutex_lock(&ctlr->bus_lock_mutex);
++ ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
++ mutex_unlock(&ctlr->bus_lock_mutex);
++}
++
+ int spi_controller_suspend(struct spi_controller *ctlr)
+ {
+- int ret;
++ int ret = 0;
+
+ /* Basically no-ops for non-queued controllers */
+- if (!ctlr->queued)
+- return 0;
+-
+- ret = spi_stop_queue(ctlr);
+- if (ret)
+- dev_err(&ctlr->dev, "queue stop failed\n");
++ if (ctlr->queued) {
++ ret = spi_stop_queue(ctlr);
++ if (ret)
++ dev_err(&ctlr->dev, "queue stop failed\n");
++ }
+
++ __spi_mark_suspended(ctlr);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(spi_controller_suspend);
+
+ int spi_controller_resume(struct spi_controller *ctlr)
+ {
+- int ret;
+-
+- if (!ctlr->queued)
+- return 0;
++ int ret = 0;
+
+- ret = spi_start_queue(ctlr);
+- if (ret)
+- dev_err(&ctlr->dev, "queue restart failed\n");
++ __spi_mark_resumed(ctlr);
+
++ if (ctlr->queued) {
++ ret = spi_start_queue(ctlr);
++ if (ret)
++ dev_err(&ctlr->dev, "queue restart failed\n");
++ }
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(spi_controller_resume);
+@@ -4050,8 +4069,7 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
+ ctlr->cur_msg = msg;
+ ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
+ if (ret)
+- goto out;
+-
++ dev_err(&ctlr->dev, "noqueue transfer failed\n");
+ ctlr->cur_msg = NULL;
+ ctlr->fallback = false;
+
+@@ -4067,7 +4085,6 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
+ spi_idle_runtime_pm(ctlr);
+ }
+
+-out:
+ mutex_unlock(&ctlr->io_mutex);
+ }
+
+@@ -4090,6 +4107,11 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
+ int status;
+ struct spi_controller *ctlr = spi->controller;
+
++ if (__spi_check_suspended(ctlr)) {
++ dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
++ return -ESHUTDOWN;
++ }
++
+ status = __spi_validate(spi, message);
+ if (status != 0)
+ return status;
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 21145eb8f2a9c..b398fba942964 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -486,6 +486,7 @@ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
+ case SC16IS7XX_TXLVL_REG:
+ case SC16IS7XX_RXLVL_REG:
+ case SC16IS7XX_IOSTATE_REG:
++ case SC16IS7XX_IOCONTROL_REG:
+ return true;
+ default:
+ break;
+@@ -1555,6 +1556,10 @@ static int sc16is7xx_probe(struct device *dev,
+ goto out_ports;
+ }
+
++ ret = uart_get_rs485_mode(&s->p[i].port);
++ if (ret)
++ goto out_ports;
++
+ /* Disable all interrupts */
+ sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_IER_REG, 0);
+ /* Disable TX/RX */
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 48bc8a4814ac4..d396ac8b9cedd 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -61,7 +61,7 @@ static void usb_parse_ssp_isoc_endpoint_companion(struct device *ddev,
+ desc = (struct usb_ssp_isoc_ep_comp_descriptor *) buffer;
+ if (desc->bDescriptorType != USB_DT_SSP_ISOC_ENDPOINT_COMP ||
+ size < USB_DT_SSP_ISOC_EP_COMP_SIZE) {
+- dev_warn(ddev, "Invalid SuperSpeedPlus isoc endpoint companion"
++ dev_notice(ddev, "Invalid SuperSpeedPlus isoc endpoint companion"
+ "for config %d interface %d altsetting %d ep %d.\n",
+ cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ return;
+@@ -83,7 +83,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+
+ if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
+ size < USB_DT_SS_EP_COMP_SIZE) {
+- dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
++ dev_notice(ddev, "No SuperSpeed endpoint companion for config %d "
+ " interface %d altsetting %d ep %d: "
+ "using minimum values\n",
+ cfgno, inum, asnum, ep->desc.bEndpointAddress);
+@@ -109,13 +109,13 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+
+ /* Check the various values */
+ if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) {
+- dev_warn(ddev, "Control endpoint with bMaxBurst = %d in "
++ dev_notice(ddev, "Control endpoint with bMaxBurst = %d in "
+ "config %d interface %d altsetting %d ep %d: "
+ "setting to zero\n", desc->bMaxBurst,
+ cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ ep->ss_ep_comp.bMaxBurst = 0;
+ } else if (desc->bMaxBurst > 15) {
+- dev_warn(ddev, "Endpoint with bMaxBurst = %d in "
++ dev_notice(ddev, "Endpoint with bMaxBurst = %d in "
+ "config %d interface %d altsetting %d ep %d: "
+ "setting to 15\n", desc->bMaxBurst,
+ cfgno, inum, asnum, ep->desc.bEndpointAddress);
+@@ -125,7 +125,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ if ((usb_endpoint_xfer_control(&ep->desc) ||
+ usb_endpoint_xfer_int(&ep->desc)) &&
+ desc->bmAttributes != 0) {
+- dev_warn(ddev, "%s endpoint with bmAttributes = %d in "
++ dev_notice(ddev, "%s endpoint with bmAttributes = %d in "
+ "config %d interface %d altsetting %d ep %d: "
+ "setting to zero\n",
+ usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk",
+@@ -134,7 +134,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ ep->ss_ep_comp.bmAttributes = 0;
+ } else if (usb_endpoint_xfer_bulk(&ep->desc) &&
+ desc->bmAttributes > 16) {
+- dev_warn(ddev, "Bulk endpoint with more than 65536 streams in "
++ dev_notice(ddev, "Bulk endpoint with more than 65536 streams in "
+ "config %d interface %d altsetting %d ep %d: "
+ "setting to max\n",
+ cfgno, inum, asnum, ep->desc.bEndpointAddress);
+@@ -142,7 +142,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ } else if (usb_endpoint_xfer_isoc(&ep->desc) &&
+ !USB_SS_SSP_ISOC_COMP(desc->bmAttributes) &&
+ USB_SS_MULT(desc->bmAttributes) > 3) {
+- dev_warn(ddev, "Isoc endpoint has Mult of %d in "
++ dev_notice(ddev, "Isoc endpoint has Mult of %d in "
+ "config %d interface %d altsetting %d ep %d: "
+ "setting to 3\n",
+ USB_SS_MULT(desc->bmAttributes),
+@@ -160,7 +160,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ else
+ max_tx = 999999;
+ if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) {
+- dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in "
++ dev_notice(ddev, "%s endpoint with wBytesPerInterval of %d in "
+ "config %d interface %d altsetting %d ep %d: "
+ "setting to %d\n",
+ usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int",
+@@ -273,7 +273,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ else if (d->bLength >= USB_DT_ENDPOINT_SIZE)
+ n = USB_DT_ENDPOINT_SIZE;
+ else {
+- dev_warn(ddev, "config %d interface %d altsetting %d has an "
++ dev_notice(ddev, "config %d interface %d altsetting %d has an "
+ "invalid endpoint descriptor of length %d, skipping\n",
+ cfgno, inum, asnum, d->bLength);
+ goto skip_to_next_endpoint_or_interface_descriptor;
+@@ -281,7 +281,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+
+ i = d->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK;
+ if (i >= 16 || i == 0) {
+- dev_warn(ddev, "config %d interface %d altsetting %d has an "
++ dev_notice(ddev, "config %d interface %d altsetting %d has an "
+ "invalid endpoint with address 0x%X, skipping\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ goto skip_to_next_endpoint_or_interface_descriptor;
+@@ -293,7 +293,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+
+ /* Check for duplicate endpoint addresses */
+ if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
+- dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
++ dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ goto skip_to_next_endpoint_or_interface_descriptor;
+ }
+@@ -301,7 +301,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ /* Ignore some endpoints */
+ if (udev->quirks & USB_QUIRK_ENDPOINT_IGNORE) {
+ if (usb_endpoint_is_ignored(udev, ifp, d)) {
+- dev_warn(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n",
++ dev_notice(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n",
+ cfgno, inum, asnum,
+ d->bEndpointAddress);
+ goto skip_to_next_endpoint_or_interface_descriptor;
+@@ -378,7 +378,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ }
+ }
+ if (d->bInterval < i || d->bInterval > j) {
+- dev_warn(ddev, "config %d interface %d altsetting %d "
++ dev_notice(ddev, "config %d interface %d altsetting %d "
+ "endpoint 0x%X has an invalid bInterval %d, "
+ "changing to %d\n",
+ cfgno, inum, asnum,
+@@ -391,7 +391,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ * them usable, we will try treating them as Interrupt endpoints.
+ */
+ if (udev->speed == USB_SPEED_LOW && usb_endpoint_xfer_bulk(d)) {
+- dev_warn(ddev, "config %d interface %d altsetting %d "
++ dev_notice(ddev, "config %d interface %d altsetting %d "
+ "endpoint 0x%X is Bulk; changing to Interrupt\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT;
+@@ -408,7 +408,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ */
+ maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
+ if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
+- dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
++ dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ }
+
+@@ -439,7 +439,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
+
+ if (maxp > j) {
+- dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
++ dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
+ cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
+ maxp = j;
+ endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
+@@ -452,7 +452,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ */
+ if (udev->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) {
+ if (maxp != 512)
+- dev_warn(ddev, "config %d interface %d altsetting %d "
++ dev_notice(ddev, "config %d interface %d altsetting %d "
+ "bulk endpoint 0x%X has invalid maxpacket %d\n",
+ cfgno, inum, asnum, d->bEndpointAddress,
+ maxp);
+@@ -533,7 +533,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
+ i < intfc->num_altsetting;
+ (++i, ++alt)) {
+ if (alt->desc.bAlternateSetting == asnum) {
+- dev_warn(ddev, "Duplicate descriptor for config %d "
++ dev_notice(ddev, "Duplicate descriptor for config %d "
+ "interface %d altsetting %d, skipping\n",
+ cfgno, inum, asnum);
+ goto skip_to_next_interface_descriptor;
+@@ -559,7 +559,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
+ num_ep = num_ep_orig = alt->desc.bNumEndpoints;
+ alt->desc.bNumEndpoints = 0; /* Use as a counter */
+ if (num_ep > USB_MAXENDPOINTS) {
+- dev_warn(ddev, "too many endpoints for config %d interface %d "
++ dev_notice(ddev, "too many endpoints for config %d interface %d "
+ "altsetting %d: %d, using maximum allowed: %d\n",
+ cfgno, inum, asnum, num_ep, USB_MAXENDPOINTS);
+ num_ep = USB_MAXENDPOINTS;
+@@ -590,7 +590,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
+ }
+
+ if (n != num_ep_orig)
+- dev_warn(ddev, "config %d interface %d altsetting %d has %d "
++ dev_notice(ddev, "config %d interface %d altsetting %d has %d "
+ "endpoint descriptor%s, different from the interface "
+ "descriptor's value: %d\n",
+ cfgno, inum, asnum, n, plural(n), num_ep_orig);
+@@ -625,7 +625,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ if (config->desc.bDescriptorType != USB_DT_CONFIG ||
+ config->desc.bLength < USB_DT_CONFIG_SIZE ||
+ config->desc.bLength > size) {
+- dev_err(ddev, "invalid descriptor for config index %d: "
++ dev_notice(ddev, "invalid descriptor for config index %d: "
+ "type = 0x%X, length = %d\n", cfgidx,
+ config->desc.bDescriptorType, config->desc.bLength);
+ return -EINVAL;
+@@ -636,7 +636,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ size -= config->desc.bLength;
+
+ if (nintf > USB_MAXINTERFACES) {
+- dev_warn(ddev, "config %d has too many interfaces: %d, "
++ dev_notice(ddev, "config %d has too many interfaces: %d, "
+ "using maximum allowed: %d\n",
+ cfgno, nintf, USB_MAXINTERFACES);
+ nintf = USB_MAXINTERFACES;
+@@ -650,7 +650,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ (buffer2 += header->bLength, size2 -= header->bLength)) {
+
+ if (size2 < sizeof(struct usb_descriptor_header)) {
+- dev_warn(ddev, "config %d descriptor has %d excess "
++ dev_notice(ddev, "config %d descriptor has %d excess "
+ "byte%s, ignoring\n",
+ cfgno, size2, plural(size2));
+ break;
+@@ -658,7 +658,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+
+ header = (struct usb_descriptor_header *) buffer2;
+ if ((header->bLength > size2) || (header->bLength < 2)) {
+- dev_warn(ddev, "config %d has an invalid descriptor "
++ dev_notice(ddev, "config %d has an invalid descriptor "
+ "of length %d, skipping remainder of the config\n",
+ cfgno, header->bLength);
+ break;
+@@ -670,7 +670,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+
+ d = (struct usb_interface_descriptor *) header;
+ if (d->bLength < USB_DT_INTERFACE_SIZE) {
+- dev_warn(ddev, "config %d has an invalid "
++ dev_notice(ddev, "config %d has an invalid "
+ "interface descriptor of length %d, "
+ "skipping\n", cfgno, d->bLength);
+ continue;
+@@ -680,7 +680,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+
+ if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) &&
+ n >= nintf_orig) {
+- dev_warn(ddev, "config %d has more interface "
++ dev_notice(ddev, "config %d has more interface "
+ "descriptors, than it declares in "
+ "bNumInterfaces, ignoring interface "
+ "number: %d\n", cfgno, inum);
+@@ -688,7 +688,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ }
+
+ if (inum >= nintf_orig)
+- dev_warn(ddev, "config %d has an invalid "
++ dev_notice(ddev, "config %d has an invalid "
+ "interface number: %d but max is %d\n",
+ cfgno, inum, nintf_orig - 1);
+
+@@ -713,14 +713,14 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+
+ d = (struct usb_interface_assoc_descriptor *)header;
+ if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
+- dev_warn(ddev,
++ dev_notice(ddev,
+ "config %d has an invalid interface association descriptor of length %d, skipping\n",
+ cfgno, d->bLength);
+ continue;
+ }
+
+ if (iad_num == USB_MAXIADS) {
+- dev_warn(ddev, "found more Interface "
++ dev_notice(ddev, "found more Interface "
+ "Association Descriptors "
+ "than allocated for in "
+ "configuration %d\n", cfgno);
+@@ -731,7 +731,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+
+ } else if (header->bDescriptorType == USB_DT_DEVICE ||
+ header->bDescriptorType == USB_DT_CONFIG)
+- dev_warn(ddev, "config %d contains an unexpected "
++ dev_notice(ddev, "config %d contains an unexpected "
+ "descriptor of type 0x%X, skipping\n",
+ cfgno, header->bDescriptorType);
+
+@@ -740,11 +740,11 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0);
+
+ if (n != nintf)
+- dev_warn(ddev, "config %d has %d interface%s, different from "
++ dev_notice(ddev, "config %d has %d interface%s, different from "
+ "the descriptor's value: %d\n",
+ cfgno, n, plural(n), nintf_orig);
+ else if (n == 0)
+- dev_warn(ddev, "config %d has no interfaces?\n", cfgno);
++ dev_notice(ddev, "config %d has no interfaces?\n", cfgno);
+ config->desc.bNumInterfaces = nintf = n;
+
+ /* Check for missing interface numbers */
+@@ -754,7 +754,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ break;
+ }
+ if (j >= nintf)
+- dev_warn(ddev, "config %d has no interface number "
++ dev_notice(ddev, "config %d has no interface number "
+ "%d\n", cfgno, i);
+ }
+
+@@ -762,7 +762,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ for (i = 0; i < nintf; ++i) {
+ j = nalts[i];
+ if (j > USB_MAXALTSETTING) {
+- dev_warn(ddev, "too many alternate settings for "
++ dev_notice(ddev, "too many alternate settings for "
+ "config %d interface %d: %d, "
+ "using maximum allowed: %d\n",
+ cfgno, inums[i], j, USB_MAXALTSETTING);
+@@ -811,7 +811,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ break;
+ }
+ if (n >= intfc->num_altsetting)
+- dev_warn(ddev, "config %d interface %d has no "
++ dev_notice(ddev, "config %d interface %d has no "
+ "altsetting %d\n", cfgno, inums[i], j);
+ }
+ }
+@@ -868,7 +868,7 @@ int usb_get_configuration(struct usb_device *dev)
+ int result;
+
+ if (ncfg > USB_MAXCONFIG) {
+- dev_warn(ddev, "too many configurations: %d, "
++ dev_notice(ddev, "too many configurations: %d, "
+ "using maximum allowed: %d\n", ncfg, USB_MAXCONFIG);
+ dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG;
+ }
+@@ -902,7 +902,7 @@ int usb_get_configuration(struct usb_device *dev)
+ "descriptor/%s: %d\n", cfgno, "start", result);
+ if (result != -EPIPE)
+ goto err;
+- dev_err(ddev, "chopping to %d config(s)\n", cfgno);
++ dev_notice(ddev, "chopping to %d config(s)\n", cfgno);
+ dev->descriptor.bNumConfigurations = cfgno;
+ break;
+ } else if (result < 4) {
+@@ -934,7 +934,7 @@ int usb_get_configuration(struct usb_device *dev)
+ goto err;
+ }
+ if (result < length) {
+- dev_warn(ddev, "config index %d descriptor too short "
++ dev_notice(ddev, "config index %d descriptor too short "
+ "(expected %i, got %i)\n", cfgno, length, result);
+ length = result;
+ }
+@@ -993,7 +993,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ /* Get BOS descriptor */
+ ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE);
+ if (ret < USB_DT_BOS_SIZE || bos->bLength < USB_DT_BOS_SIZE) {
+- dev_err(ddev, "unable to get BOS descriptor or descriptor too short\n");
++ dev_notice(ddev, "unable to get BOS descriptor or descriptor too short\n");
+ if (ret >= 0)
+ ret = -ENOMSG;
+ kfree(bos);
+@@ -1021,7 +1021,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+
+ ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len);
+ if (ret < total_len) {
+- dev_err(ddev, "unable to get BOS descriptor set\n");
++ dev_notice(ddev, "unable to get BOS descriptor set\n");
+ if (ret >= 0)
+ ret = -ENOMSG;
+ goto err;
+@@ -1046,8 +1046,8 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ }
+
+ if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
+- dev_warn(ddev, "descriptor type invalid, skip\n");
+- continue;
++ dev_notice(ddev, "descriptor type invalid, skip\n");
++ goto skip_to_next_descriptor;
+ }
+
+ switch (cap_type) {
+@@ -1081,6 +1081,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ break;
+ }
+
++skip_to_next_descriptor:
+ total_len -= length;
+ buffer += length;
+ }
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index c9a101f0e8d01..c9438dc56f5fc 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -184,7 +184,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ int ret;
+ int irq;
+ struct xhci_plat_priv *priv = NULL;
+-
++ bool of_match;
+
+ if (usb_disabled())
+ return -ENODEV;
+@@ -305,16 +305,23 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ &xhci->imod_interval);
+ }
+
+- hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
+- if (IS_ERR(hcd->usb_phy)) {
+- ret = PTR_ERR(hcd->usb_phy);
+- if (ret == -EPROBE_DEFER)
+- goto disable_clk;
+- hcd->usb_phy = NULL;
+- } else {
+- ret = usb_phy_init(hcd->usb_phy);
+- if (ret)
+- goto disable_clk;
++ /*
++ * Drivers such as dwc3 manages PHYs themself (and rely on driver name
++ * matching for the xhci platform device).
++ */
++ of_match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev);
++ if (of_match) {
++ hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
++ if (IS_ERR(hcd->usb_phy)) {
++ ret = PTR_ERR(hcd->usb_phy);
++ if (ret == -EPROBE_DEFER)
++ goto disable_clk;
++ hcd->usb_phy = NULL;
++ } else {
++ ret = usb_phy_init(hcd->usb_phy);
++ if (ret)
++ goto disable_clk;
++ }
+ }
+
+ hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
+diff --git a/drivers/video/fbdev/sticore.h b/drivers/video/fbdev/sticore.h
+index 0ebdd28a0b813..d83ab3ded5f3d 100644
+--- a/drivers/video/fbdev/sticore.h
++++ b/drivers/video/fbdev/sticore.h
+@@ -231,7 +231,7 @@ struct sti_rom_font {
+ u8 height;
+ u8 font_type; /* language type */
+ u8 bytes_per_char;
+- u32 next_font;
++ s32 next_font; /* note: signed int */
+ u8 underline_height;
+ u8 underline_pos;
+ u8 res008[2];
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index af9115d648092..00f8e349921d4 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -1710,9 +1710,10 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
+ generic_handle_irq(irq);
+ }
+
+-static void __xen_evtchn_do_upcall(void)
++int xen_evtchn_do_upcall(void)
+ {
+ struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
++ int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE;
+ int cpu = smp_processor_id();
+ struct evtchn_loop_ctrl ctrl = { 0 };
+
+@@ -1744,25 +1745,10 @@ static void __xen_evtchn_do_upcall(void)
+ * above.
+ */
+ __this_cpu_inc(irq_epoch);
+-}
+-
+-void xen_evtchn_do_upcall(struct pt_regs *regs)
+-{
+- struct pt_regs *old_regs = set_irq_regs(regs);
+-
+- irq_enter();
+-
+- __xen_evtchn_do_upcall();
+
+- irq_exit();
+- set_irq_regs(old_regs);
+-}
+-
+-void xen_hvm_evtchn_do_upcall(void)
+-{
+- __xen_evtchn_do_upcall();
++ return ret;
+ }
+-EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
++EXPORT_SYMBOL_GPL(xen_evtchn_do_upcall);
+
+ /* Rebind a new event channel to an existing irq. */
+ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
+diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
+index cd07e3fed0faf..544d3f9010b92 100644
+--- a/drivers/xen/platform-pci.c
++++ b/drivers/xen/platform-pci.c
+@@ -64,14 +64,13 @@ static uint64_t get_callback_via(struct pci_dev *pdev)
+
+ static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
+ {
+- xen_hvm_evtchn_do_upcall();
+- return IRQ_HANDLED;
++ return xen_evtchn_do_upcall();
+ }
+
+ static int xen_allocate_irq(struct pci_dev *pdev)
+ {
+ return request_irq(pdev->irq, do_hvm_evtchn_intr,
+- IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
++ IRQF_NOBALANCING | IRQF_SHARED,
+ "xen-platform-pci", pdev);
+ }
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index d1dae29a3d012..40152458e7b74 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3493,6 +3493,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
+ goto fail_alloc;
+ }
+
++ btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
+ /*
+ * Verify the type first, if that or the checksum value are
+ * corrupted, we'll find out
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index e015e1e025b6e..dc6e3cce747c1 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -5236,6 +5236,7 @@ static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat
+ arg->clone_sources = compat_ptr(args32.clone_sources);
+ arg->parent_root = args32.parent_root;
+ arg->flags = args32.flags;
++ arg->version = args32.version;
+ memcpy(arg->reserved, args32.reserved,
+ sizeof(args32.reserved));
+ #else
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index a248f46cfe728..4b052d4009d31 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -788,6 +788,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ dump_ref_action(fs_info, ra);
+ kfree(ref);
+ kfree(ra);
++ kfree(re);
+ goto out_unlock;
+ } else if (be->num_refs == 0) {
+ btrfs_err(fs_info,
+@@ -797,6 +798,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ dump_ref_action(fs_info, ra);
+ kfree(ref);
+ kfree(ra);
++ kfree(re);
+ goto out_unlock;
+ }
+
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 547b5c2292186..4a4d65b5e24f7 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -7885,7 +7885,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
+ }
+
+ sctx->send_filp = fget(arg->send_fd);
+- if (!sctx->send_filp) {
++ if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ goto out;
+ }
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 2c562febd801e..6fc5fa18d1ee6 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -391,7 +391,10 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
+
+ static void btrfs_put_super(struct super_block *sb)
+ {
+- close_ctree(btrfs_sb(sb));
++ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
++
++ btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid);
++ close_ctree(fs_info);
+ }
+
+ enum {
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index e62b4c139a72d..6fc2d99270c18 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -3074,15 +3074,16 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
+ read_unlock(&em_tree->lock);
+
+ if (!em) {
+- btrfs_crit(fs_info, "unable to find logical %llu length %llu",
++ btrfs_crit(fs_info,
++ "unable to find chunk map for logical %llu length %llu",
+ logical, length);
+ return ERR_PTR(-EINVAL);
+ }
+
+- if (em->start > logical || em->start + em->len < logical) {
++ if (em->start > logical || em->start + em->len <= logical) {
+ btrfs_crit(fs_info,
+- "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
+- logical, length, em->start, em->start + em->len);
++ "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
++ logical, logical + length, em->start, em->start + em->len);
+ free_extent_map(em);
+ return ERR_PTR(-EINVAL);
+ }
+diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
+index 4eb559a16c9ed..105c4a1d20a20 100644
+--- a/fs/iomap/direct-io.c
++++ b/fs/iomap/direct-io.c
+@@ -94,7 +94,6 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
+ if (offset + ret > dio->i_size &&
+ !(dio->flags & IOMAP_DIO_WRITE))
+ ret = dio->i_size - offset;
+- iocb->ki_pos += ret;
+ }
+
+ /*
+@@ -120,18 +119,19 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
+ }
+
+ inode_dio_end(file_inode(iocb->ki_filp));
+- /*
+- * If this is a DSYNC write, make sure we push it to stable storage now
+- * that we've written data.
+- */
+- if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
+- ret = generic_write_sync(iocb, ret);
+-
+- if (ret > 0)
+- ret += dio->done_before;
++ if (ret > 0) {
++ iocb->ki_pos += ret;
+
++ /*
++ * If this is a DSYNC write, make sure we push it to stable
++ * storage now that we've written data.
++ */
++ if (dio->flags & IOMAP_DIO_NEED_SYNC)
++ ret = generic_write_sync(iocb, ret);
++ if (ret > 0)
++ ret += dio->done_before;
++ }
+ kfree(dio);
+-
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(iomap_dio_complete);
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index 05516309ec3ab..7be51f9d2fa18 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -772,6 +772,8 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr, struct cifs_open_i
+ }
+
+ if (S_ISLNK(fattr->cf_mode)) {
++ if (likely(data->symlink_target))
++ fattr->cf_eof = strnlen(data->symlink_target, PATH_MAX);
+ fattr->cf_symlink_target = data->symlink_target;
+ data->symlink_target = NULL;
+ }
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 4cc56e4695fbc..e628848a1df93 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -3424,6 +3424,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+ struct inode *inode = file_inode(file);
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct cifsFileInfo *cfile = file->private_data;
++ unsigned long long new_size;
+ long rc;
+ unsigned int xid;
+ __le64 eof;
+@@ -3454,10 +3455,15 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+ /*
+ * do we also need to change the size of the file?
+ */
+- if (keep_size == false && i_size_read(inode) < offset + len) {
+- eof = cpu_to_le64(offset + len);
++ new_size = offset + len;
++ if (keep_size == false && (unsigned long long)i_size_read(inode) < new_size) {
++ eof = cpu_to_le64(new_size);
+ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid, cfile->pid, &eof);
++ if (rc >= 0) {
++ truncate_setsize(inode, new_size);
++ fscache_resize_cookie(cifs_inode_cookie(inode), new_size);
++ }
+ }
+
+ zero_range_exit:
+@@ -3852,6 +3858,9 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
+ if (rc < 0)
+ goto out_2;
+
++ truncate_setsize(inode, old_eof + len);
++ fscache_resize_cookie(cifs_inode_cookie(inode), i_size_read(inode));
++
+ rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
+ if (rc < 0)
+ goto out_2;
+diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
+index 3dfb994312b1f..b79097b9070b3 100644
+--- a/include/linux/dma-fence.h
++++ b/include/linux/dma-fence.h
+@@ -478,6 +478,21 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
+ return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
+ }
+
++/**
++ * dma_fence_is_later_or_same - return true if f1 is later or same as f2
++ * @f1: the first fence from the same context
++ * @f2: the second fence from the same context
++ *
++ * Returns true if f1 is chronologically later than f2 or the same fence. Both
++ * fences must be from the same context, since a seqno is not re-used across
++ * contexts.
++ */
++static inline bool dma_fence_is_later_or_same(struct dma_fence *f1,
++ struct dma_fence *f2)
++{
++ return f1 == f2 || dma_fence_is_later(f1, f2);
++}
++
+ /**
+ * dma_fence_later - return the chronologically later fence
+ * @f1: the first fence from the same context
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index c3eb89606c2b1..06c692cc0accb 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -215,8 +215,6 @@ struct irq_data {
+ * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
+ * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
+ * IRQD_CAN_RESERVE - Can use reservation mode
+- * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
+- * required
+ * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
+ * from actual interrupt context.
+ * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
+@@ -245,10 +243,9 @@ enum {
+ IRQD_SINGLE_TARGET = (1 << 24),
+ IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
+ IRQD_CAN_RESERVE = (1 << 26),
+- IRQD_MSI_NOMASK_QUIRK = (1 << 27),
+- IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
+- IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
+- IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30),
++ IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 27),
++ IRQD_AFFINITY_ON_ACTIVATE = (1 << 28),
++ IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 29),
+ };
+
+ #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
+@@ -423,21 +420,6 @@ static inline bool irqd_can_reserve(struct irq_data *d)
+ return __irqd_to_state(d) & IRQD_CAN_RESERVE;
+ }
+
+-static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
+-{
+- __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+-static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
+-{
+- __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+-static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
+-{
+- return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+ static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+ {
+ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index 8fdd3cf971a30..8f918f9a1228d 100644
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -450,6 +450,7 @@ struct mmc_host {
+ unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
+ unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
+ unsigned int can_dma_map_merge:1; /* merging can be used */
++ unsigned int vqmmc_enabled:1; /* vqmmc regulator is enabled */
+
+ int rescan_disable; /* disable card detection */
+ int rescan_entered; /* used with nonremovable devices */
+@@ -597,6 +598,8 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
+ #endif
+
+ int mmc_regulator_get_supply(struct mmc_host *mmc);
++int mmc_regulator_enable_vqmmc(struct mmc_host *mmc);
++void mmc_regulator_disable_vqmmc(struct mmc_host *mmc);
+
+ static inline int mmc_card_is_removable(struct mmc_host *host)
+ {
+diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
+index fbf8c0d95968e..877395e075afe 100644
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -531,6 +531,7 @@ struct spi_controller {
+ #define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
+
+ #define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
++#define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */
+
+ /* Flag indicating if the allocation of this struct is devres-managed */
+ bool devm_allocated;
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index 5aabc36fb249b..fdc31fdb612da 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -167,19 +167,25 @@ struct scsi_device {
+ * power state for system suspend/resume (suspend to RAM and
+ * hibernation) operations.
+ */
+- bool manage_system_start_stop;
++ unsigned manage_system_start_stop:1;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for runtime device suspand and resume operations.
+ */
+- bool manage_runtime_start_stop;
++ unsigned manage_runtime_start_stop:1;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for system shutdown (power off) operations.
+ */
+- bool manage_shutdown;
++ unsigned manage_shutdown:1;
++
++ /*
++ * If set and if the device is runtime suspended, ask the high-level
++ * device driver (sd) to force a runtime resume of the device.
++ */
++ unsigned force_runtime_start_on_system_start:1;
+
+ unsigned removable:1;
+ unsigned changed:1; /* Data invalid due to media change */
+diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
+index 7837ba4fe7289..dcd50fb2164a1 100644
+--- a/include/uapi/linux/stddef.h
++++ b/include/uapi/linux/stddef.h
+@@ -27,7 +27,7 @@
+ union { \
+ struct { MEMBERS } ATTRS; \
+ struct TAG { MEMBERS } ATTRS NAME; \
+- }
++ } ATTRS
+
+ /**
+ * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
+diff --git a/include/xen/events.h b/include/xen/events.h
+index 344081e71584b..b303bd24e2a6c 100644
+--- a/include/xen/events.h
++++ b/include/xen/events.h
+@@ -106,8 +106,7 @@ int irq_from_virq(unsigned int cpu, unsigned int virq);
+ evtchn_port_t evtchn_from_irq(unsigned irq);
+
+ int xen_set_callback_via(uint64_t via);
+-void xen_evtchn_do_upcall(struct pt_regs *regs);
+-void xen_hvm_evtchn_do_upcall(void);
++int xen_evtchn_do_upcall(void);
+
+ /* Bind a pirq for a physical interrupt to an irq. */
+ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
+diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
+index bbcaac64038ef..0b78fac882b2b 100644
+--- a/kernel/irq/debugfs.c
++++ b/kernel/irq/debugfs.c
+@@ -121,7 +121,6 @@ static const struct irq_bit_descr irqdata_states[] = {
+ BIT_MASK_DESCR(IRQD_AFFINITY_ON_ACTIVATE),
+ BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
+ BIT_MASK_DESCR(IRQD_CAN_RESERVE),
+- BIT_MASK_DESCR(IRQD_MSI_NOMASK_QUIRK),
+
+ BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
+
+diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
+index 77e513e2e5da7..4e462b5f7bbe8 100644
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -807,7 +807,6 @@ static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
+
+ #define VIRQ_CAN_RESERVE 0x01
+ #define VIRQ_ACTIVATE 0x02
+-#define VIRQ_NOMASK_QUIRK 0x04
+
+ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
+ {
+@@ -816,8 +815,6 @@ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflag
+
+ if (!(vflags & VIRQ_CAN_RESERVE)) {
+ irqd_clr_can_reserve(irqd);
+- if (vflags & VIRQ_NOMASK_QUIRK)
+- irqd_set_msi_nomask_quirk(irqd);
+
+ /*
+ * If the interrupt is managed but no CPU is available to
+@@ -877,15 +874,8 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
+ * Interrupt can use a reserved vector and will not occupy
+ * a real device vector until the interrupt is requested.
+ */
+- if (msi_check_reservation_mode(domain, info, dev)) {
++ if (msi_check_reservation_mode(domain, info, dev))
+ vflags |= VIRQ_CAN_RESERVE;
+- /*
+- * MSI affinity setting requires a special quirk (X86) when
+- * reservation mode is active.
+- */
+- if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
+- vflags |= VIRQ_NOMASK_QUIRK;
+- }
+
+ msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) {
+ ops->set_desc(&arg, desc);
+diff --git a/lib/errname.c b/lib/errname.c
+index 67739b174a8cc..0c336b0f12f60 100644
+--- a/lib/errname.c
++++ b/lib/errname.c
+@@ -111,9 +111,6 @@ static const char *names_0[] = {
+ E(ENOSPC),
+ E(ENOSR),
+ E(ENOSTR),
+-#ifdef ENOSYM
+- E(ENOSYM),
+-#endif
+ E(ENOSYS),
+ E(ENOTBLK),
+ E(ENOTCONN),
+@@ -144,9 +141,6 @@ static const char *names_0[] = {
+ #endif
+ E(EREMOTE),
+ E(EREMOTEIO),
+-#ifdef EREMOTERELEASE
+- E(EREMOTERELEASE),
+-#endif
+ E(ERESTART),
+ E(ERFKILL),
+ E(EROFS),
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index cbc4816ed7d83..ac53ef7eec915 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -216,8 +216,10 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
+ int tv = prandom_u32_max(max_delay);
+
+ im->tm_running = 1;
+- if (!mod_timer(&im->timer, jiffies+tv+2))
+- refcount_inc(&im->refcnt);
++ if (refcount_inc_not_zero(&im->refcnt)) {
++ if (mod_timer(&im->timer, jiffies + tv + 2))
++ ip_ma_put(im);
++ }
+ }
+
+ static void igmp_gq_start_timer(struct in_device *in_dev)
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index e1accacc6f233..ee980965a7cfb 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -297,6 +297,7 @@ struct cfg80211_cqm_config {
+ u32 rssi_hyst;
+ s32 last_rssi_event_value;
+ enum nl80211_cqm_rssi_threshold_event last_rssi_event_type;
++ bool use_range_api;
+ int n_rssi_thresholds;
+ s32 rssi_thresholds[];
+ };
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index b19b5acfaf3a9..42c858219b341 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -12574,10 +12574,6 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ int i, n, low_index;
+ int err;
+
+- /* RSSI reporting disabled? */
+- if (!cqm_config)
+- return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
+-
+ /*
+ * Obtain current RSSI value if possible, if not and no RSSI threshold
+ * event has been received yet, we should receive an event after a
+@@ -12652,18 +12648,6 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ return -EOPNOTSUPP;
+
+- if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) {
+- if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */
+- return rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+-
+- return rdev_set_cqm_rssi_config(rdev, dev,
+- thresholds[0], hysteresis);
+- }
+-
+- if (!wiphy_ext_feature_isset(&rdev->wiphy,
+- NL80211_EXT_FEATURE_CQM_RSSI_LIST))
+- return -EOPNOTSUPP;
+-
+ if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */
+ n_thresholds = 0;
+
+@@ -12671,6 +12655,20 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ old = rcu_dereference_protected(wdev->cqm_config,
+ lockdep_is_held(&wdev->mtx));
+
++ /* if already disabled just succeed */
++ if (!n_thresholds && !old)
++ return 0;
++
++ if (n_thresholds > 1) {
++ if (!wiphy_ext_feature_isset(&rdev->wiphy,
++ NL80211_EXT_FEATURE_CQM_RSSI_LIST) ||
++ !rdev->ops->set_cqm_rssi_range_config)
++ return -EOPNOTSUPP;
++ } else {
++ if (!rdev->ops->set_cqm_rssi_config)
++ return -EOPNOTSUPP;
++ }
++
+ if (n_thresholds) {
+ cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds,
+ n_thresholds),
+@@ -12685,13 +12683,26 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
+ memcpy(cqm_config->rssi_thresholds, thresholds,
+ flex_array_size(cqm_config, rssi_thresholds,
+ n_thresholds));
++ cqm_config->use_range_api = n_thresholds > 1 ||
++ !rdev->ops->set_cqm_rssi_config;
+
+ rcu_assign_pointer(wdev->cqm_config, cqm_config);
++
++ if (cqm_config->use_range_api)
++ err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
++ else
++ err = rdev_set_cqm_rssi_config(rdev, dev,
++ thresholds[0],
++ hysteresis);
+ } else {
+ RCU_INIT_POINTER(wdev->cqm_config, NULL);
++ /* if enabled as range also disable via range */
++ if (old->use_range_api)
++ err = rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
++ else
++ err = rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
+ }
+
+- err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
+ if (err) {
+ rcu_assign_pointer(wdev->cqm_config, old);
+ kfree_rcu(cqm_config, rcu_head);
+@@ -18758,10 +18769,11 @@ void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work)
+ wdev_lock(wdev);
+ cqm_config = rcu_dereference_protected(wdev->cqm_config,
+ lockdep_is_held(&wdev->mtx));
+- if (!wdev->cqm_config)
++ if (!cqm_config)
+ goto unlock;
+
+- cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
++ if (cqm_config->use_range_api)
++ cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
+
+ rssi_level = cqm_config->last_rssi_event_value;
+ rssi_event = cqm_config->last_rssi_event_type;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 54f4b593a1158..5aaf3dcecf27e 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2215,6 +2215,8 @@ static const struct snd_pci_quirk power_save_denylist[] = {
+ SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
+ /* https://bugs.launchpad.net/bugs/1821663 */
+ SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
++ /* KONTRON SinglePC may cause a stall at runtime resume */
++ SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0),
+ {}
+ };
+ #endif /* CONFIG_PM */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b63e12b661996..d1944c83b03a2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1986,6 +1986,7 @@ enum {
+ ALC887_FIXUP_ASUS_AUDIO,
+ ALC887_FIXUP_ASUS_HMIC,
+ ALCS1200A_FIXUP_MIC_VREF,
++ ALC888VD_FIXUP_MIC_100VREF,
+ };
+
+ static void alc889_fixup_coef(struct hda_codec *codec,
+@@ -2539,6 +2540,13 @@ static const struct hda_fixup alc882_fixups[] = {
+ {}
+ }
+ },
++ [ALC888VD_FIXUP_MIC_100VREF] = {
++ .type = HDA_FIXUP_PINCTLS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x18, PIN_VREF100 }, /* headset mic */
++ {}
++ }
++ },
+ };
+
+ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+@@ -2608,6 +2616,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
+
+ SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
++ SND_PCI_QUIRK(0x10ec, 0x12d8, "iBase Elo Touch", ALC888VD_FIXUP_MIC_100VREF),
+ SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+@@ -3255,6 +3264,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
+ case 0x10ec0230:
+ case 0x10ec0236:
+ case 0x10ec0256:
++ case 0x10ec0257:
+ case 0x19e58326:
+ alc_write_coef_idx(codec, 0x48, 0x0);
+ alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
+@@ -3284,6 +3294,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
+ case 0x10ec0230:
+ case 0x10ec0236:
+ case 0x10ec0256:
++ case 0x10ec0257:
+ case 0x19e58326:
+ alc_write_coef_idx(codec, 0x48, 0xd011);
+ alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
+@@ -6495,6 +6506,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
+ case 0x10ec0236:
+ case 0x10ec0255:
+ case 0x10ec0256:
++ case 0x10ec0257:
+ case 0x19e58326:
+ alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
+ alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
+diff --git a/tools/arch/parisc/include/uapi/asm/errno.h b/tools/arch/parisc/include/uapi/asm/errno.h
+index 87245c584784e..8d94739d75c67 100644
+--- a/tools/arch/parisc/include/uapi/asm/errno.h
++++ b/tools/arch/parisc/include/uapi/asm/errno.h
+@@ -75,7 +75,6 @@
+
+ /* We now return you to your regularly scheduled HPUX. */
+
+-#define ENOSYM 215 /* symbol does not exist in executable */
+ #define ENOTSOCK 216 /* Socket operation on non-socket */
+ #define EDESTADDRREQ 217 /* Destination address required */
+ #define EMSGSIZE 218 /* Message too long */
+@@ -101,7 +100,6 @@
+ #define ETIMEDOUT 238 /* Connection timed out */
+ #define ECONNREFUSED 239 /* Connection refused */
+ #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+-#define EREMOTERELEASE 240 /* Remote peer released connection */
+ #define EHOSTDOWN 241 /* Host is down */
+ #define EHOSTUNREACH 242 /* No route to host */
+
+diff --git a/tools/testing/selftests/net/af_unix/diag_uid.c b/tools/testing/selftests/net/af_unix/diag_uid.c
+index 5b88f7129fea4..79a3dd75590e8 100644
+--- a/tools/testing/selftests/net/af_unix/diag_uid.c
++++ b/tools/testing/selftests/net/af_unix/diag_uid.c
+@@ -148,7 +148,6 @@ void receive_response(struct __test_metadata *_metadata,
+ .msg_iov = &iov,
+ .msg_iovlen = 1
+ };
+- struct unix_diag_req *udr;
+ struct nlmsghdr *nlh;
+ int ret;
+
+diff --git a/tools/testing/selftests/net/cmsg_sender.c b/tools/testing/selftests/net/cmsg_sender.c
+index 24b21b15ed3fb..6ff3e732f449f 100644
+--- a/tools/testing/selftests/net/cmsg_sender.c
++++ b/tools/testing/selftests/net/cmsg_sender.c
+@@ -416,9 +416,9 @@ int main(int argc, char *argv[])
+ {
+ struct addrinfo hints, *ai;
+ struct iovec iov[1];
++ unsigned char *buf;
+ struct msghdr msg;
+ char cbuf[1024];
+- char *buf;
+ int err;
+ int fd;
+
+diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c
+index 9a8229abfa026..be4a30a0d02ae 100644
+--- a/tools/testing/selftests/net/ipsec.c
++++ b/tools/testing/selftests/net/ipsec.c
+@@ -2263,7 +2263,7 @@ static int check_results(void)
+
+ int main(int argc, char **argv)
+ {
+- unsigned int nr_process = 1;
++ long nr_process = 1;
+ int route_sock = -1, ret = KSFT_SKIP;
+ int test_desc_fd[2];
+ uint32_t route_seq;
+@@ -2284,7 +2284,7 @@ int main(int argc, char **argv)
+ exit_usage(argv);
+ }
+
+- if (nr_process > MAX_PROCESSES || !nr_process) {
++ if (nr_process > MAX_PROCESSES || nr_process < 1) {
+ printk("nr_process should be between [1; %u]",
+ MAX_PROCESSES);
+ exit_usage(argv);
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+index 7df6b9b6f9a84..e6b514cb7bdda 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -18,6 +18,7 @@
+
+ #include <sys/ioctl.h>
+ #include <sys/poll.h>
++#include <sys/random.h>
+ #include <sys/sendfile.h>
+ #include <sys/stat.h>
+ #include <sys/socket.h>
+@@ -1050,15 +1051,11 @@ again:
+
+ static void init_rng(void)
+ {
+- int fd = open("/dev/urandom", O_RDONLY);
+ unsigned int foo;
+
+- if (fd > 0) {
+- int ret = read(fd, &foo, sizeof(foo));
+-
+- if (ret < 0)
+- srand(fd + foo);
+- close(fd);
++ if (getrandom(&foo, sizeof(foo), 0) == -1) {
++ perror("getrandom");
++ exit(1);
+ }
+
+ srand(foo);
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_inq.c b/tools/testing/selftests/net/mptcp/mptcp_inq.c
+index 8672d898f8cda..218aac4673212 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_inq.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_inq.c
+@@ -18,6 +18,7 @@
+ #include <time.h>
+
+ #include <sys/ioctl.h>
++#include <sys/random.h>
+ #include <sys/socket.h>
+ #include <sys/types.h>
+ #include <sys/wait.h>
+@@ -519,15 +520,11 @@ static int client(int unixfd)
+
+ static void init_rng(void)
+ {
+- int fd = open("/dev/urandom", O_RDONLY);
+ unsigned int foo;
+
+- if (fd > 0) {
+- int ret = read(fd, &foo, sizeof(foo));
+-
+- if (ret < 0)
+- srand(fd + foo);
+- close(fd);
++ if (getrandom(&foo, sizeof(foo), 0) == -1) {
++ perror("getrandom");
++ exit(1);
+ }
+
+ srand(foo);