summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2020-05-02 09:26:35 -0400
committerMike Pagano <mpagano@gentoo.org>2020-05-02 09:26:35 -0400
commit6325bd142c1dd00cc25073175adc64ba81e7a604 (patch)
tree9349c2e29040eebcbc13badd11a9e367086951f7
parentLinux patch 5.6.8 (diff)
downloadlinux-patches-6325bd14.tar.gz
linux-patches-6325bd14.tar.bz2
linux-patches-6325bd14.zip
Linux patch 5.6.95.6-12
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1008_linux-5.6.9.patch4807
2 files changed, 4811 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index d756ad3a..8794f80b 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch: 1007_linux-5.6.8.patch
From: http://www.kernel.org
Desc: Linux 5.6.8
+Patch: 1008_linux-5.6.9.patch
+From: http://www.kernel.org
+Desc: Linux 5.6.9
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1008_linux-5.6.9.patch b/1008_linux-5.6.9.patch
new file mode 100644
index 00000000..1ec7c3d2
--- /dev/null
+++ b/1008_linux-5.6.9.patch
@@ -0,0 +1,4807 @@
+diff --git a/Makefile b/Makefile
+index e7101c99d81b..2fc8ba07d930 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 6
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
+index fd2c766e0f71..f7ae5a4530b8 100644
+--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
++++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
+@@ -14,6 +14,9 @@
+ soc {
+ firmware: firmware {
+ compatible = "raspberrypi,bcm2835-firmware", "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <1>;
++
+ mboxes = <&mailbox>;
+ dma-ranges;
+ };
+diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
+index e1abe8c730ce..b83a864e2e8b 100644
+--- a/arch/arm/boot/dts/bcm283x.dtsi
++++ b/arch/arm/boot/dts/bcm283x.dtsi
+@@ -372,6 +372,7 @@
+ "dsi0_ddr2",
+ "dsi0_ddr";
+
++ status = "disabled";
+ };
+
+ aux: aux@7e215000 {
+diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
+index a075b63f3087..11d41e86f814 100644
+--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
++++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
+@@ -341,6 +341,11 @@
+ status = "disabled";
+ };
+
++/* RNG not directly accessible on N950/N9. */
++&rng_target {
++ status = "disabled";
++};
++
+ &usb_otg_hs {
+ interface-type = <0>;
+ usb-phy = <&usb2_phy>;
+diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
+index b91570ff9db1..931037500e83 100644
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -49,7 +49,9 @@
+ #ifndef CONFIG_BROKEN_GAS_INST
+
+ #ifdef __ASSEMBLY__
+-#define __emit_inst(x) .inst (x)
++// The space separator is omitted so that __emit_inst(x) can be parsed as
++// either an assembler directive or an assembler macro argument.
++#define __emit_inst(x) .inst(x)
+ #else
+ #define __emit_inst(x) ".inst " __stringify((x)) "\n\t"
+ #endif
+diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
+index 61f2b0412345..ccba63aaeb47 100644
+--- a/arch/s390/kernel/diag.c
++++ b/arch/s390/kernel/diag.c
+@@ -133,7 +133,7 @@ void diag_stat_inc(enum diag_stat_enum nr)
+ }
+ EXPORT_SYMBOL(diag_stat_inc);
+
+-void diag_stat_inc_norecursion(enum diag_stat_enum nr)
++void notrace diag_stat_inc_norecursion(enum diag_stat_enum nr)
+ {
+ this_cpu_inc(diag_stat.counter[nr]);
+ trace_s390_diagnose_norecursion(diag_map[nr].code);
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index f87d4e14269c..4f8cb8d1c51b 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -403,7 +403,7 @@ int smp_find_processor_id(u16 address)
+ return -1;
+ }
+
+-bool arch_vcpu_is_preempted(int cpu)
++bool notrace arch_vcpu_is_preempted(int cpu)
+ {
+ if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
+ return false;
+@@ -413,7 +413,7 @@ bool arch_vcpu_is_preempted(int cpu)
+ }
+ EXPORT_SYMBOL(arch_vcpu_is_preempted);
+
+-void smp_yield_cpu(int cpu)
++void notrace smp_yield_cpu(int cpu)
+ {
+ if (!MACHINE_HAS_DIAG9C)
+ return;
+diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
+index 490b52e85014..11a669f3cc93 100644
+--- a/arch/s390/kernel/trace.c
++++ b/arch/s390/kernel/trace.c
+@@ -14,7 +14,7 @@ EXPORT_TRACEPOINT_SYMBOL(s390_diagnose);
+
+ static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth);
+
+-void trace_s390_diagnose_norecursion(int diag_nr)
++void notrace trace_s390_diagnose_norecursion(int diag_nr)
+ {
+ unsigned long flags;
+ unsigned int *depth;
+diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
+index fbe97ab2e228..743f257cf2cb 100644
+--- a/arch/s390/pci/pci_irq.c
++++ b/arch/s390/pci/pci_irq.c
+@@ -115,7 +115,6 @@ static struct irq_chip zpci_irq_chip = {
+ .name = "PCI-MSI",
+ .irq_unmask = pci_msi_unmask_irq,
+ .irq_mask = pci_msi_mask_irq,
+- .irq_set_affinity = zpci_set_irq_affinity,
+ };
+
+ static void zpci_handle_cpu_local_irq(bool rescan)
+@@ -276,7 +275,9 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+ rc = -EIO;
+ if (hwirq - bit >= msi_vecs)
+ break;
+- irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE, msi->affinity);
++ irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE,
++ (irq_delivery == DIRECTED) ?
++ msi->affinity : NULL);
+ if (irq < 0)
+ return -ENOMEM;
+ rc = irq_set_msi_desc(irq, msi);
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index d2daa206872d..275f5ffdf6f0 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -140,6 +140,7 @@ export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
+ # When cleaning we don't include .config, so we don't include
+ # TT or skas makefiles and don't clean skas_ptregs.h.
+ CLEAN_FILES += linux x.i gmon.out
++MRPROPER_DIRS += arch/$(SUBARCH)/include/generated
+
+ archclean:
+ @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 5e296a7e6036..ebf34c7bc8bc 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -227,8 +227,8 @@ static void __init ms_hyperv_init_platform(void)
+ ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
+ ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
+
+- pr_info("Hyper-V: features 0x%x, hints 0x%x\n",
+- ms_hyperv.features, ms_hyperv.hints);
++ pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n",
++ ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features);
+
+ ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
+ ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS);
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 9ba08e9abc09..6aa53c33b471 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -158,6 +158,19 @@ static bool is_ereg(u32 reg)
+ BIT(BPF_REG_AX));
+ }
+
++/*
++ * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
++ * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
++ * of encoding. al,cl,dl,bl have simpler encoding.
++ */
++static bool is_ereg_8l(u32 reg)
++{
++ return is_ereg(reg) ||
++ (1 << reg) & (BIT(BPF_REG_1) |
++ BIT(BPF_REG_2) |
++ BIT(BPF_REG_FP));
++}
++
+ static bool is_axreg(u32 reg)
+ {
+ return reg == BPF_REG_0;
+@@ -598,9 +611,8 @@ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
+ switch (size) {
+ case BPF_B:
+ /* Emit 'mov byte ptr [rax + off], al' */
+- if (is_ereg(dst_reg) || is_ereg(src_reg) ||
+- /* We have to add extra byte for x86 SIL, DIL regs */
+- src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
++ if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
++ /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
+ EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
+ else
+ EMIT1(0x88);
+diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
+index 4d2a7a764602..66cd150b7e54 100644
+--- a/arch/x86/net/bpf_jit_comp32.c
++++ b/arch/x86/net/bpf_jit_comp32.c
+@@ -1847,14 +1847,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ case BPF_B:
+ case BPF_H:
+ case BPF_W:
+- if (!bpf_prog->aux->verifier_zext)
++ if (bpf_prog->aux->verifier_zext)
+ break;
+ if (dstk) {
+ EMIT3(0xC7, add_1reg(0x40, IA32_EBP),
+ STACK_VAR(dst_hi));
+ EMIT(0x0, 4);
+ } else {
+- EMIT3(0xC7, add_1reg(0xC0, dst_hi), 0);
++ /* xor dst_hi,dst_hi */
++ EMIT2(0x33,
++ add_2reg(0xC0, dst_hi, dst_hi));
+ }
+ break;
+ case BPF_DW:
+@@ -2013,8 +2015,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_X: {
+ bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
+- u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
+- u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
++ u8 dreg_lo = IA32_EAX;
++ u8 dreg_hi = IA32_EDX;
+ u8 sreg_lo = sstk ? IA32_ECX : src_lo;
+ u8 sreg_hi = sstk ? IA32_EBX : src_hi;
+
+@@ -2026,6 +2028,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
++ } else {
++ /* mov dreg_lo,dst_lo */
++ EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
++ if (is_jmp64)
++ /* mov dreg_hi,dst_hi */
++ EMIT2(0x89,
++ add_2reg(0xC0, dreg_hi, dst_hi));
+ }
+
+ if (sstk) {
+@@ -2050,8 +2059,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K: {
+ bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
+- u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
+- u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
++ u8 dreg_lo = IA32_EAX;
++ u8 dreg_hi = IA32_EDX;
+ u8 sreg_lo = IA32_ECX;
+ u8 sreg_hi = IA32_EBX;
+ u32 hi;
+@@ -2064,6 +2073,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
++ } else {
++ /* mov dreg_lo,dst_lo */
++ EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
++ if (is_jmp64)
++ /* mov dreg_hi,dst_hi */
++ EMIT2(0x89,
++ add_2reg(0xC0, dreg_hi, dst_hi));
+ }
+
+ /* mov ecx,imm32 */
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index a47294063882..a20914b38e6a 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -202,7 +202,7 @@ virt_to_phys_or_null_size(void *va, unsigned long size)
+
+ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ {
+- unsigned long pfn, text, pf;
++ unsigned long pfn, text, pf, rodata;
+ struct page *page;
+ unsigned npages;
+ pgd_t *pgd = efi_mm.pgd;
+@@ -256,7 +256,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+
+ efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */
+
+- npages = (__end_rodata_aligned - _text) >> PAGE_SHIFT;
++ npages = (_etext - _text) >> PAGE_SHIFT;
+ text = __pa(_text);
+ pfn = text >> PAGE_SHIFT;
+
+@@ -266,6 +266,14 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ return 1;
+ }
+
++ npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
++ rodata = __pa(__start_rodata);
++ pfn = rodata >> PAGE_SHIFT;
++ if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
++ pr_err("Failed to map kernel rodata 1:1\n");
++ return 1;
++ }
++
+ return 0;
+ }
+
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 9a599cc28c29..2dc5dc54e257 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -1594,7 +1594,7 @@ skip_surplus_transfers:
+ vrate_min, vrate_max);
+ }
+
+- trace_iocost_ioc_vrate_adj(ioc, vrate, &missed_ppm, rq_wait_pct,
++ trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
+ nr_lagging, nr_shortages,
+ nr_surpluses);
+
+@@ -1603,7 +1603,7 @@ skip_surplus_transfers:
+ ioc->period_us * vrate * INUSE_MARGIN_PCT, 100);
+ } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
+ trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
+- &missed_ppm, rq_wait_pct, nr_lagging,
++ missed_ppm, rq_wait_pct, nr_lagging,
+ nr_shortages, nr_surpluses);
+ }
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 37ff8dfb8ab9..2c3a1b2e0753 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1205,8 +1205,10 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
+ rq = list_first_entry(list, struct request, queuelist);
+
+ hctx = rq->mq_hctx;
+- if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
++ if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
++ blk_mq_put_driver_tag(rq);
+ break;
++ }
+
+ if (!blk_mq_get_driver_tag(rq)) {
+ /*
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 0e99a760aebd..8646147dc194 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -726,7 +726,7 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
+
+ if (is_async(dev)) {
+ get_device(dev);
+- async_schedule(func, dev);
++ async_schedule_dev(func, dev);
+ return true;
+ }
+
+diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
+index 536b59aabd2c..bacebd457e6f 100644
+--- a/drivers/clk/clk-asm9260.c
++++ b/drivers/clk/clk-asm9260.c
+@@ -276,7 +276,7 @@ static void __init asm9260_acc_init(struct device_node *np)
+
+ /* TODO: Convert to DT parent scheme */
+ ref_clk = of_clk_get_parent_name(np, 0);
+- hw = __clk_hw_register_fixed_rate_with_accuracy(NULL, NULL, pll_clk,
++ hw = __clk_hw_register_fixed_rate(NULL, NULL, pll_clk,
+ ref_clk, NULL, NULL, 0, rate, 0,
+ CLK_FIXED_RATE_PARENT_ACCURACY);
+
+diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
+index 17e67a84777d..dd0a57f80988 100644
+--- a/drivers/counter/104-quad-8.c
++++ b/drivers/counter/104-quad-8.c
+@@ -42,6 +42,7 @@ MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
+ * @base: base port address of the IIO device
+ */
+ struct quad8_iio {
++ struct mutex lock;
+ struct counter_device counter;
+ unsigned int preset[QUAD8_NUM_COUNTERS];
+ unsigned int count_mode[QUAD8_NUM_COUNTERS];
+@@ -116,6 +117,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
+ /* Borrow XOR Carry effectively doubles count range */
+ *val = (borrow ^ carry) << 24;
+
++ mutex_lock(&priv->lock);
++
+ /* Reset Byte Pointer; transfer Counter to Output Latch */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
+ base_offset + 1);
+@@ -123,6 +126,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
+ for (i = 0; i < 3; i++)
+ *val |= (unsigned int)inb(base_offset) << (8 * i);
+
++ mutex_unlock(&priv->lock);
++
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ *val = priv->ab_enable[chan->channel];
+@@ -153,6 +158,8 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
+ if ((unsigned int)val > 0xFFFFFF)
+ return -EINVAL;
+
++ mutex_lock(&priv->lock);
++
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+@@ -176,12 +183,16 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
+ /* Reset Error flag */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+
++ mutex_unlock(&priv->lock);
++
+ return 0;
+ case IIO_CHAN_INFO_ENABLE:
+ /* only boolean values accepted */
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
++ mutex_lock(&priv->lock);
++
+ priv->ab_enable[chan->channel] = val;
+
+ ior_cfg = val | priv->preset_enable[chan->channel] << 1;
+@@ -189,11 +200,18 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
+ /* Load I/O control configuration */
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
+
++ mutex_unlock(&priv->lock);
++
+ return 0;
+ case IIO_CHAN_INFO_SCALE:
++ mutex_lock(&priv->lock);
++
+ /* Quadrature scaling only available in quadrature mode */
+- if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1))
++ if (!priv->quadrature_mode[chan->channel] &&
++ (val2 || val != 1)) {
++ mutex_unlock(&priv->lock);
+ return -EINVAL;
++ }
+
+ /* Only three gain states (1, 0.5, 0.25) */
+ if (val == 1 && !val2)
+@@ -207,11 +225,15 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
+ priv->quadrature_scale[chan->channel] = 2;
+ break;
+ default:
++ mutex_unlock(&priv->lock);
+ return -EINVAL;
+ }
+- else
++ else {
++ mutex_unlock(&priv->lock);
+ return -EINVAL;
++ }
+
++ mutex_unlock(&priv->lock);
+ return 0;
+ }
+
+@@ -248,6 +270,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
+ if (preset > 0xFFFFFF)
+ return -EINVAL;
+
++ mutex_lock(&priv->lock);
++
+ priv->preset[chan->channel] = preset;
+
+ /* Reset Byte Pointer */
+@@ -257,6 +281,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
+ for (i = 0; i < 3; i++)
+ outb(preset >> (8 * i), base_offset);
+
++ mutex_unlock(&priv->lock);
++
+ return len;
+ }
+
+@@ -286,6 +312,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
+ /* Preset enable is active low in Input/Output Control register */
+ preset_enable = !preset_enable;
+
++ mutex_lock(&priv->lock);
++
+ priv->preset_enable[chan->channel] = preset_enable;
+
+ ior_cfg = priv->ab_enable[chan->channel] |
+@@ -294,6 +322,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
+ /* Load I/O control configuration to Input / Output Control Register */
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+
++ mutex_unlock(&priv->lock);
++
+ return len;
+ }
+
+@@ -351,6 +381,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev,
+ unsigned int mode_cfg = cnt_mode << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
++ mutex_lock(&priv->lock);
++
+ priv->count_mode[chan->channel] = cnt_mode;
+
+ /* Add quadrature mode configuration */
+@@ -360,6 +392,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev,
+ /* Load mode configuration to Counter Mode Register */
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+
++ mutex_unlock(&priv->lock);
++
+ return 0;
+ }
+
+@@ -387,19 +421,26 @@ static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int synchronous_mode)
+ {
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+- const unsigned int idr_cfg = synchronous_mode |
+- priv->index_polarity[chan->channel] << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
++ unsigned int idr_cfg = synchronous_mode;
++
++ mutex_lock(&priv->lock);
++
++ idr_cfg |= priv->index_polarity[chan->channel] << 1;
+
+ /* Index function must be non-synchronous in non-quadrature mode */
+- if (synchronous_mode && !priv->quadrature_mode[chan->channel])
++ if (synchronous_mode && !priv->quadrature_mode[chan->channel]) {
++ mutex_unlock(&priv->lock);
+ return -EINVAL;
++ }
+
+ priv->synchronous_mode[chan->channel] = synchronous_mode;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+
++ mutex_unlock(&priv->lock);
++
+ return 0;
+ }
+
+@@ -427,8 +468,12 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int quadrature_mode)
+ {
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+- unsigned int mode_cfg = priv->count_mode[chan->channel] << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
++ unsigned int mode_cfg;
++
++ mutex_lock(&priv->lock);
++
++ mode_cfg = priv->count_mode[chan->channel] << 1;
+
+ if (quadrature_mode)
+ mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
+@@ -446,6 +491,8 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
+ /* Load mode configuration to Counter Mode Register */
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+
++ mutex_unlock(&priv->lock);
++
+ return 0;
+ }
+
+@@ -473,15 +520,20 @@ static int quad8_set_index_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int index_polarity)
+ {
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+- const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] |
+- index_polarity << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
++ unsigned int idr_cfg = index_polarity << 1;
++
++ mutex_lock(&priv->lock);
++
++ idr_cfg |= priv->synchronous_mode[chan->channel];
+
+ priv->index_polarity[chan->channel] = index_polarity;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+
++ mutex_unlock(&priv->lock);
++
+ return 0;
+ }
+
+@@ -582,7 +634,7 @@ static int quad8_signal_read(struct counter_device *counter,
+ static int quad8_count_read(struct counter_device *counter,
+ struct counter_count *count, unsigned long *val)
+ {
+- const struct quad8_iio *const priv = counter->priv;
++ struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id;
+ unsigned int flags;
+ unsigned int borrow;
+@@ -596,6 +648,8 @@ static int quad8_count_read(struct counter_device *counter,
+ /* Borrow XOR Carry effectively doubles count range */
+ *val = (unsigned long)(borrow ^ carry) << 24;
+
++ mutex_lock(&priv->lock);
++
+ /* Reset Byte Pointer; transfer Counter to Output Latch */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
+ base_offset + 1);
+@@ -603,13 +657,15 @@ static int quad8_count_read(struct counter_device *counter,
+ for (i = 0; i < 3; i++)
+ *val |= (unsigned long)inb(base_offset) << (8 * i);
+
++ mutex_unlock(&priv->lock);
++
+ return 0;
+ }
+
+ static int quad8_count_write(struct counter_device *counter,
+ struct counter_count *count, unsigned long val)
+ {
+- const struct quad8_iio *const priv = counter->priv;
++ struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id;
+ int i;
+
+@@ -617,6 +673,8 @@ static int quad8_count_write(struct counter_device *counter,
+ if (val > 0xFFFFFF)
+ return -EINVAL;
+
++ mutex_lock(&priv->lock);
++
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+@@ -640,6 +698,8 @@ static int quad8_count_write(struct counter_device *counter,
+ /* Reset Error flag */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+
++ mutex_unlock(&priv->lock);
++
+ return 0;
+ }
+
+@@ -660,13 +720,13 @@ static enum counter_count_function quad8_count_functions_list[] = {
+ static int quad8_function_get(struct counter_device *counter,
+ struct counter_count *count, size_t *function)
+ {
+- const struct quad8_iio *const priv = counter->priv;
++ struct quad8_iio *const priv = counter->priv;
+ const int id = count->id;
+- const unsigned int quadrature_mode = priv->quadrature_mode[id];
+- const unsigned int scale = priv->quadrature_scale[id];
+
+- if (quadrature_mode)
+- switch (scale) {
++ mutex_lock(&priv->lock);
++
++ if (priv->quadrature_mode[id])
++ switch (priv->quadrature_scale[id]) {
+ case 0:
+ *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1;
+ break;
+@@ -680,6 +740,8 @@ static int quad8_function_get(struct counter_device *counter,
+ else
+ *function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION;
+
++ mutex_unlock(&priv->lock);
++
+ return 0;
+ }
+
+@@ -690,10 +752,15 @@ static int quad8_function_set(struct counter_device *counter,
+ const int id = count->id;
+ unsigned int *const quadrature_mode = priv->quadrature_mode + id;
+ unsigned int *const scale = priv->quadrature_scale + id;
+- unsigned int mode_cfg = priv->count_mode[id] << 1;
+ unsigned int *const synchronous_mode = priv->synchronous_mode + id;
+- const unsigned int idr_cfg = priv->index_polarity[id] << 1;
+ const int base_offset = priv->base + 2 * id + 1;
++ unsigned int mode_cfg;
++ unsigned int idr_cfg;
++
++ mutex_lock(&priv->lock);
++
++ mode_cfg = priv->count_mode[id] << 1;
++ idr_cfg = priv->index_polarity[id] << 1;
+
+ if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) {
+ *quadrature_mode = 0;
+@@ -729,6 +796,8 @@ static int quad8_function_set(struct counter_device *counter,
+ /* Load mode configuration to Counter Mode Register */
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+
++ mutex_unlock(&priv->lock);
++
+ return 0;
+ }
+
+@@ -845,15 +914,20 @@ static int quad8_index_polarity_set(struct counter_device *counter,
+ {
+ struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id - 16;
+- const unsigned int idr_cfg = priv->synchronous_mode[channel_id] |
+- index_polarity << 1;
+ const int base_offset = priv->base + 2 * channel_id + 1;
++ unsigned int idr_cfg = index_polarity << 1;
++
++ mutex_lock(&priv->lock);
++
++ idr_cfg |= priv->synchronous_mode[channel_id];
+
+ priv->index_polarity[channel_id] = index_polarity;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+
++ mutex_unlock(&priv->lock);
++
+ return 0;
+ }
+
+@@ -880,19 +954,26 @@ static int quad8_synchronous_mode_set(struct counter_device *counter,
+ {
+ struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id - 16;
+- const unsigned int idr_cfg = synchronous_mode |
+- priv->index_polarity[channel_id] << 1;
+ const int base_offset = priv->base + 2 * channel_id + 1;
++ unsigned int idr_cfg = synchronous_mode;
++
++ mutex_lock(&priv->lock);
++
++ idr_cfg |= priv->index_polarity[channel_id] << 1;
+
+ /* Index function must be non-synchronous in non-quadrature mode */
+- if (synchronous_mode && !priv->quadrature_mode[channel_id])
++ if (synchronous_mode && !priv->quadrature_mode[channel_id]) {
++ mutex_unlock(&priv->lock);
+ return -EINVAL;
++ }
+
+ priv->synchronous_mode[channel_id] = synchronous_mode;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+
++ mutex_unlock(&priv->lock);
++
+ return 0;
+ }
+
+@@ -957,6 +1038,8 @@ static int quad8_count_mode_set(struct counter_device *counter,
+ break;
+ }
+
++ mutex_lock(&priv->lock);
++
+ priv->count_mode[count->id] = cnt_mode;
+
+ /* Set count mode configuration value */
+@@ -969,6 +1052,8 @@ static int quad8_count_mode_set(struct counter_device *counter,
+ /* Load mode configuration to Counter Mode Register */
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+
++ mutex_unlock(&priv->lock);
++
+ return 0;
+ }
+
+@@ -1010,6 +1095,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter,
+ if (err)
+ return err;
+
++ mutex_lock(&priv->lock);
++
+ priv->ab_enable[count->id] = ab_enable;
+
+ ior_cfg = ab_enable | priv->preset_enable[count->id] << 1;
+@@ -1017,6 +1104,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter,
+ /* Load I/O control configuration */
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
+
++ mutex_unlock(&priv->lock);
++
+ return len;
+ }
+
+@@ -1045,14 +1134,28 @@ static ssize_t quad8_count_preset_read(struct counter_device *counter,
+ return sprintf(buf, "%u\n", priv->preset[count->id]);
+ }
+
++static void quad8_preset_register_set(struct quad8_iio *quad8iio, int id,
++ unsigned int preset)
++{
++ const unsigned int base_offset = quad8iio->base + 2 * id;
++ int i;
++
++ quad8iio->preset[id] = preset;
++
++ /* Reset Byte Pointer */
++ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
++
++ /* Set Preset Register */
++ for (i = 0; i < 3; i++)
++ outb(preset >> (8 * i), base_offset);
++}
++
+ static ssize_t quad8_count_preset_write(struct counter_device *counter,
+ struct counter_count *count, void *private, const char *buf, size_t len)
+ {
+ struct quad8_iio *const priv = counter->priv;
+- const int base_offset = priv->base + 2 * count->id;
+ unsigned int preset;
+ int ret;
+- int i;
+
+ ret = kstrtouint(buf, 0, &preset);
+ if (ret)
+@@ -1062,14 +1165,11 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter,
+ if (preset > 0xFFFFFF)
+ return -EINVAL;
+
+- priv->preset[count->id] = preset;
++ mutex_lock(&priv->lock);
+
+- /* Reset Byte Pointer */
+- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
++ quad8_preset_register_set(priv, count->id, preset);
+
+- /* Set Preset Register */
+- for (i = 0; i < 3; i++)
+- outb(preset >> (8 * i), base_offset);
++ mutex_unlock(&priv->lock);
+
+ return len;
+ }
+@@ -1077,15 +1177,20 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter,
+ static ssize_t quad8_count_ceiling_read(struct counter_device *counter,
+ struct counter_count *count, void *private, char *buf)
+ {
+- const struct quad8_iio *const priv = counter->priv;
++ struct quad8_iio *const priv = counter->priv;
++
++ mutex_lock(&priv->lock);
+
+ /* Range Limit and Modulo-N count modes use preset value as ceiling */
+ switch (priv->count_mode[count->id]) {
+ case 1:
+ case 3:
+- return quad8_count_preset_read(counter, count, private, buf);
++ mutex_unlock(&priv->lock);
++ return sprintf(buf, "%u\n", priv->preset[count->id]);
+ }
+
++ mutex_unlock(&priv->lock);
++
+ /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
+ return sprintf(buf, "33554431\n");
+ }
+@@ -1094,15 +1199,29 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
+ struct counter_count *count, void *private, const char *buf, size_t len)
+ {
+ struct quad8_iio *const priv = counter->priv;
++ unsigned int ceiling;
++ int ret;
++
++ ret = kstrtouint(buf, 0, &ceiling);
++ if (ret)
++ return ret;
++
++ /* Only 24-bit values are supported */
++ if (ceiling > 0xFFFFFF)
++ return -EINVAL;
++
++ mutex_lock(&priv->lock);
+
+ /* Range Limit and Modulo-N count modes use preset value as ceiling */
+ switch (priv->count_mode[count->id]) {
+ case 1:
+ case 3:
+- return quad8_count_preset_write(counter, count, private, buf,
+- len);
++ quad8_preset_register_set(priv, count->id, ceiling);
++ break;
+ }
+
++ mutex_unlock(&priv->lock);
++
+ return len;
+ }
+
+@@ -1130,6 +1249,8 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter,
+ /* Preset enable is active low in Input/Output Control register */
+ preset_enable = !preset_enable;
+
++ mutex_lock(&priv->lock);
++
+ priv->preset_enable[count->id] = preset_enable;
+
+ ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1;
+@@ -1137,6 +1258,8 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter,
+ /* Load I/O control configuration to Input / Output Control Register */
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+
++ mutex_unlock(&priv->lock);
++
+ return len;
+ }
+
+@@ -1307,6 +1430,9 @@ static int quad8_probe(struct device *dev, unsigned int id)
+ quad8iio->counter.priv = quad8iio;
+ quad8iio->base = base[id];
+
++ /* Initialize mutex */
++ mutex_init(&quad8iio->lock);
++
+ /* Reset all counters and disable interrupt function */
+ outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
+ /* Set initial configuration for all counters */
+diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
+index e937605670ac..8c2e85f884d3 100644
+--- a/drivers/crypto/chelsio/chcr_core.c
++++ b/drivers/crypto/chelsio/chcr_core.c
+@@ -125,8 +125,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx)
+ atomic_set(&dev->inflight, 0);
+ mutex_lock(&drv_data.drv_mutex);
+ list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
+- if (!drv_data.last_dev)
+- drv_data.last_dev = u_ctx;
+ mutex_unlock(&drv_data.drv_mutex);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index c8bf9cb3cebf..f184cdca938d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1953,8 +1953,24 @@ static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
+ */
+ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
+ {
+- return !!memcmp(adev->gart.ptr, adev->reset_magic,
+- AMDGPU_RESET_MAGIC_NUM);
++ if (memcmp(adev->gart.ptr, adev->reset_magic,
++ AMDGPU_RESET_MAGIC_NUM))
++ return true;
++
++ if (!adev->in_gpu_reset)
++ return false;
++
++ /*
++ * For all ASICs with baco/mode1 reset, the VRAM is
++ * always assumed to be lost.
++ */
++ switch (amdgpu_asic_reset_method(adev)) {
++ case AMD_RESET_METHOD_BACO:
++ case AMD_RESET_METHOD_MODE1:
++ return true;
++ default:
++ return false;
++ }
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
+index 006f21ef7ddf..62635e58e45e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik.c
+@@ -1358,8 +1358,6 @@ static int cik_asic_reset(struct amdgpu_device *adev)
+ int r;
+
+ if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+- if (!adev->in_suspend)
+- amdgpu_inc_vram_lost(adev);
+ r = amdgpu_dpm_baco_reset(adev);
+ } else {
+ r = cik_asic_pci_config_reset(adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index 2d1bebdf1603..cc3a79029376 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -351,8 +351,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
+ struct smu_context *smu = &adev->smu;
+
+ if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+- if (!adev->in_suspend)
+- amdgpu_inc_vram_lost(adev);
+ ret = smu_baco_enter(smu);
+ if (ret)
+ return ret;
+@@ -360,8 +358,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
+ if (ret)
+ return ret;
+ } else {
+- if (!adev->in_suspend)
+- amdgpu_inc_vram_lost(adev);
+ ret = nv_asic_mode1_reset(adev);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index d8945c31b622..132a67a041a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -569,14 +569,10 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
+
+ switch (soc15_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_BACO:
+- if (!adev->in_suspend)
+- amdgpu_inc_vram_lost(adev);
+ return soc15_asic_baco_reset(adev);
+ case AMD_RESET_METHOD_MODE2:
+ return amdgpu_dpm_mode2_reset(adev);
+ default:
+- if (!adev->in_suspend)
+- amdgpu_inc_vram_lost(adev);
+ return soc15_asic_mode1_reset(adev);
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index 78b35901643b..3ce10e05d0d6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -765,8 +765,6 @@ static int vi_asic_reset(struct amdgpu_device *adev)
+ int r;
+
+ if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+- if (!adev->in_suspend)
+- amdgpu_inc_vram_lost(adev);
+ r = amdgpu_dpm_baco_reset(adev);
+ } else {
+ r = vi_asic_pci_config_reset(adev);
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index 47ac20aee06f..4c1c61aa4b82 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -403,7 +403,7 @@ config SENSORS_DRIVETEMP
+ hard disk drives.
+
+ This driver can also be built as a module. If so, the module
+- will be called satatemp.
++ will be called drivetemp.
+
+ config SENSORS_DS620
+ tristate "Dallas Semiconductor DS620"
+diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
+index 370d0c74eb01..9179460c2d9d 100644
+--- a/drivers/hwmon/drivetemp.c
++++ b/drivers/hwmon/drivetemp.c
+@@ -264,12 +264,18 @@ static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val)
+ return err;
+ switch (attr) {
+ case hwmon_temp_input:
++ if (!temp_is_valid(buf[SCT_STATUS_TEMP]))
++ return -ENODATA;
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP]);
+ break;
+ case hwmon_temp_lowest:
++ if (!temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]))
++ return -ENODATA;
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]);
+ break;
+ case hwmon_temp_highest:
++ if (!temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]))
++ return -ENODATA;
+ *val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]);
+ break;
+ default:
+diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
+index f2d81b0558e5..e3f1ebee7130 100644
+--- a/drivers/hwmon/jc42.c
++++ b/drivers/hwmon/jc42.c
+@@ -506,7 +506,7 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
+ }
+ data->config = config;
+
+- hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
++ hwmon_dev = devm_hwmon_device_register_with_info(dev, "jc42",
+ data, &jc42_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
+index 1de23b4f3809..92d2c706c2a7 100644
+--- a/drivers/i2c/busses/i2c-altera.c
++++ b/drivers/i2c/busses/i2c-altera.c
+@@ -384,7 +384,6 @@ static int altr_i2c_probe(struct platform_device *pdev)
+ struct altr_i2c_dev *idev = NULL;
+ struct resource *res;
+ int irq, ret;
+- u32 val;
+
+ idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
+ if (!idev)
+@@ -411,17 +410,17 @@ static int altr_i2c_probe(struct platform_device *pdev)
+ init_completion(&idev->msg_complete);
+ spin_lock_init(&idev->lock);
+
+- val = device_property_read_u32(idev->dev, "fifo-size",
++ ret = device_property_read_u32(idev->dev, "fifo-size",
+ &idev->fifo_size);
+- if (val) {
++ if (ret) {
+ dev_err(&pdev->dev, "FIFO size set to default of %d\n",
+ ALTR_I2C_DFLT_FIFO_SZ);
+ idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ;
+ }
+
+- val = device_property_read_u32(idev->dev, "clock-frequency",
++ ret = device_property_read_u32(idev->dev, "clock-frequency",
+ &idev->bus_clk_rate);
+- if (val) {
++ if (ret) {
+ dev_err(&pdev->dev, "Default to 100kHz\n");
+ idev->bus_clk_rate = 100000; /* default clock rate */
+ }
+diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
+index b747db97f78a..e5691e330323 100644
+--- a/drivers/iio/adc/ad7793.c
++++ b/drivers/iio/adc/ad7793.c
+@@ -542,7 +542,7 @@ static const struct iio_info ad7797_info = {
+ .read_raw = &ad7793_read_raw,
+ .write_raw = &ad7793_write_raw,
+ .write_raw_get_fmt = &ad7793_write_raw_get_fmt,
+- .attrs = &ad7793_attribute_group,
++ .attrs = &ad7797_attribute_group,
+ .validate_trigger = ad_sd_validate_trigger,
+ };
+
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+index 9c3486a8134f..84b27b624149 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+@@ -337,6 +337,7 @@ enum st_lsm6dsx_fifo_mode {
+ * @gain: Configured sensor sensitivity.
+ * @odr: Output data rate of the sensor [Hz].
+ * @watermark: Sensor watermark level.
++ * @decimator: Sensor decimation factor.
+ * @sip: Number of samples in a given pattern.
+ * @ts_ref: Sensor timestamp reference for hw one.
+ * @ext_info: Sensor settings if it is connected to i2c controller
+@@ -350,11 +351,13 @@ struct st_lsm6dsx_sensor {
+ u32 odr;
+
+ u16 watermark;
++ u8 decimator;
+ u8 sip;
+ s64 ts_ref;
+
+ struct {
+ const struct st_lsm6dsx_ext_dev_settings *settings;
++ u32 slv_odr;
+ u8 addr;
+ } ext_info;
+ };
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+index bb899345f2bb..afd00daeefb2 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+@@ -93,6 +93,7 @@ st_lsm6dsx_get_decimator_val(struct st_lsm6dsx_sensor *sensor, u32 max_odr)
+ break;
+ }
+
++ sensor->decimator = decimator;
+ return i == max_size ? 0 : st_lsm6dsx_decimator_table[i].val;
+ }
+
+@@ -337,7 +338,7 @@ static inline int st_lsm6dsx_read_block(struct st_lsm6dsx_hw *hw, u8 addr,
+ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
+ {
+ struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor, *ext_sensor = NULL;
+- int err, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset;
++ int err, sip, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset;
+ u16 fifo_len, pattern_len = hw->sip * ST_LSM6DSX_SAMPLE_SIZE;
+ u16 fifo_diff_mask = hw->settings->fifo_ops.fifo_diff.mask;
+ u8 gyro_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
+@@ -399,19 +400,20 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
+ acc_sip = acc_sensor->sip;
+ ts_sip = hw->ts_sip;
+ offset = 0;
++ sip = 0;
+
+ while (acc_sip > 0 || gyro_sip > 0 || ext_sip > 0) {
+- if (gyro_sip > 0) {
++ if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) {
+ memcpy(gyro_buff, &hw->buff[offset],
+ ST_LSM6DSX_SAMPLE_SIZE);
+ offset += ST_LSM6DSX_SAMPLE_SIZE;
+ }
+- if (acc_sip > 0) {
++ if (acc_sip > 0 && !(sip % acc_sensor->decimator)) {
+ memcpy(acc_buff, &hw->buff[offset],
+ ST_LSM6DSX_SAMPLE_SIZE);
+ offset += ST_LSM6DSX_SAMPLE_SIZE;
+ }
+- if (ext_sip > 0) {
++ if (ext_sip > 0 && !(sip % ext_sensor->decimator)) {
+ memcpy(ext_buff, &hw->buff[offset],
+ ST_LSM6DSX_SAMPLE_SIZE);
+ offset += ST_LSM6DSX_SAMPLE_SIZE;
+@@ -441,18 +443,25 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
+ offset += ST_LSM6DSX_SAMPLE_SIZE;
+ }
+
+- if (gyro_sip-- > 0)
++ if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) {
+ iio_push_to_buffers_with_timestamp(
+ hw->iio_devs[ST_LSM6DSX_ID_GYRO],
+ gyro_buff, gyro_sensor->ts_ref + ts);
+- if (acc_sip-- > 0)
++ gyro_sip--;
++ }
++ if (acc_sip > 0 && !(sip % acc_sensor->decimator)) {
+ iio_push_to_buffers_with_timestamp(
+ hw->iio_devs[ST_LSM6DSX_ID_ACC],
+ acc_buff, acc_sensor->ts_ref + ts);
+- if (ext_sip-- > 0)
++ acc_sip--;
++ }
++ if (ext_sip > 0 && !(sip % ext_sensor->decimator)) {
+ iio_push_to_buffers_with_timestamp(
+ hw->iio_devs[ST_LSM6DSX_ID_EXT0],
+ ext_buff, ext_sensor->ts_ref + ts);
++ ext_sip--;
++ }
++ sip++;
+ }
+ }
+
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+index 95ddd19d1aa7..64ef07a30726 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+@@ -421,7 +421,8 @@ int st_lsm6dsx_shub_set_enable(struct st_lsm6dsx_sensor *sensor, bool enable)
+
+ settings = sensor->ext_info.settings;
+ if (enable) {
+- err = st_lsm6dsx_shub_set_odr(sensor, sensor->odr);
++ err = st_lsm6dsx_shub_set_odr(sensor,
++ sensor->ext_info.slv_odr);
+ if (err < 0)
+ return err;
+ } else {
+@@ -459,7 +460,7 @@ st_lsm6dsx_shub_read_oneshot(struct st_lsm6dsx_sensor *sensor,
+ if (err < 0)
+ return err;
+
+- delay = 1000000000 / sensor->odr;
++ delay = 1000000000 / sensor->ext_info.slv_odr;
+ usleep_range(delay, 2 * delay);
+
+ len = min_t(int, sizeof(data), ch->scan_type.realbits >> 3);
+@@ -500,8 +501,8 @@ st_lsm6dsx_shub_read_raw(struct iio_dev *iio_dev,
+ iio_device_release_direct_mode(iio_dev);
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+- *val = sensor->odr / 1000;
+- *val2 = (sensor->odr % 1000) * 1000;
++ *val = sensor->ext_info.slv_odr / 1000;
++ *val2 = (sensor->ext_info.slv_odr % 1000) * 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+@@ -535,8 +536,20 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
+
+ val = val * 1000 + val2 / 1000;
+ err = st_lsm6dsx_shub_get_odr_val(sensor, val, &data);
+- if (!err)
+- sensor->odr = val;
++ if (!err) {
++ struct st_lsm6dsx_hw *hw = sensor->hw;
++ struct st_lsm6dsx_sensor *ref_sensor;
++ u8 odr_val;
++ int odr;
++
++ ref_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
++ odr = st_lsm6dsx_check_odr(ref_sensor, val, &odr_val);
++ if (odr < 0)
++ return odr;
++
++ sensor->ext_info.slv_odr = val;
++ sensor->odr = odr;
++ }
+ break;
+ }
+ default:
+@@ -613,6 +626,7 @@ st_lsm6dsx_shub_alloc_iiodev(struct st_lsm6dsx_hw *hw,
+ const struct st_lsm6dsx_ext_dev_settings *info,
+ u8 i2c_addr, const char *name)
+ {
++ enum st_lsm6dsx_sensor_id ref_id = ST_LSM6DSX_ID_ACC;
+ struct iio_chan_spec *ext_channels;
+ struct st_lsm6dsx_sensor *sensor;
+ struct iio_dev *iio_dev;
+@@ -628,7 +642,8 @@ st_lsm6dsx_shub_alloc_iiodev(struct st_lsm6dsx_hw *hw,
+ sensor = iio_priv(iio_dev);
+ sensor->id = id;
+ sensor->hw = hw;
+- sensor->odr = info->odr_table.odr_avl[0].milli_hz;
++ sensor->odr = hw->settings->odr_table[ref_id].odr_avl[0].milli_hz;
++ sensor->ext_info.slv_odr = info->odr_table.odr_avl[0].milli_hz;
+ sensor->gain = info->fs_table.fs_avl[0].gain;
+ sensor->ext_info.settings = info;
+ sensor->ext_info.addr = i2c_addr;
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 7c8f65c9c32d..381513e05302 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -14,6 +14,7 @@
+ #include <linux/dma-iommu.h>
+ #include <linux/efi.h>
+ #include <linux/interrupt.h>
++#include <linux/iopoll.h>
+ #include <linux/irqdomain.h>
+ #include <linux/list.h>
+ #include <linux/log2.h>
+@@ -3516,6 +3517,20 @@ out:
+ return IRQ_SET_MASK_OK_DONE;
+ }
+
++static void its_wait_vpt_parse_complete(void)
++{
++ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
++ u64 val;
++
++ if (!gic_rdists->has_vpend_valid_dirty)
++ return;
++
++ WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER,
++ val,
++ !(val & GICR_VPENDBASER_Dirty),
++ 10, 500));
++}
++
+ static void its_vpe_schedule(struct its_vpe *vpe)
+ {
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+@@ -3546,6 +3561,8 @@ static void its_vpe_schedule(struct its_vpe *vpe)
+ val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
+ val |= GICR_VPENDBASER_Valid;
+ gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
++
++ its_wait_vpt_parse_complete();
+ }
+
+ static void its_vpe_deschedule(struct its_vpe *vpe)
+@@ -3752,6 +3769,8 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe,
+ val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
+
+ gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
++
++ its_wait_vpt_parse_complete();
+ }
+
+ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 1eec9d4649d5..71a84f9c5696 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -866,6 +866,7 @@ static int __gic_update_rdist_properties(struct redist_region *region,
+ gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
+ gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
+ gic_data.rdists.has_rvpeid);
++ gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
+
+ /* Detect non-sensical configurations */
+ if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
+@@ -886,10 +887,11 @@ static void gic_update_rdist_properties(void)
+ if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
+ gic_data.ppi_nr = 0;
+ pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
+- pr_info("%sVLPI support, %sdirect LPI support, %sRVPEID support\n",
+- !gic_data.rdists.has_vlpis ? "no " : "",
+- !gic_data.rdists.has_direct_lpi ? "no " : "",
+- !gic_data.rdists.has_rvpeid ? "no " : "");
++ if (gic_data.rdists.has_vlpis)
++ pr_info("GICv4 features: %s%s%s\n",
++ gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
++ gic_data.rdists.has_rvpeid ? "RVPEID " : "",
++ gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
+ }
+
+ /* Check whether it's single security state view */
+@@ -1614,6 +1616,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
+ gic_data.rdists.has_rvpeid = true;
+ gic_data.rdists.has_vlpis = true;
+ gic_data.rdists.has_direct_lpi = true;
++ gic_data.rdists.has_vpend_valid_dirty = true;
+
+ if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
+ err = -ENOMEM;
+diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
+index ccc7f823911b..bc7aebcc96e9 100644
+--- a/drivers/irqchip/irq-meson-gpio.c
++++ b/drivers/irqchip/irq-meson-gpio.c
+@@ -144,12 +144,17 @@ struct meson_gpio_irq_controller {
+ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
+ unsigned int reg, u32 mask, u32 val)
+ {
++ unsigned long flags;
+ u32 tmp;
+
++ spin_lock_irqsave(&ctl->lock, flags);
++
+ tmp = readl_relaxed(ctl->base + reg);
+ tmp &= ~mask;
+ tmp |= val;
+ writel_relaxed(tmp, ctl->base + reg);
++
++ spin_unlock_irqrestore(&ctl->lock, flags);
+ }
+
+ static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
+@@ -196,14 +201,15 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
+ unsigned long hwirq,
+ u32 **channel_hwirq)
+ {
++ unsigned long flags;
+ unsigned int idx;
+
+- spin_lock(&ctl->lock);
++ spin_lock_irqsave(&ctl->lock, flags);
+
+ /* Find a free channel */
+ idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL);
+ if (idx >= NUM_CHANNEL) {
+- spin_unlock(&ctl->lock);
++ spin_unlock_irqrestore(&ctl->lock, flags);
+ pr_err("No channel available\n");
+ return -ENOSPC;
+ }
+@@ -211,6 +217,8 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
+ /* Mark the channel as used */
+ set_bit(idx, ctl->channel_map);
+
++ spin_unlock_irqrestore(&ctl->lock, flags);
++
+ /*
+ * Setup the mux of the channel to route the signal of the pad
+ * to the appropriate input of the GIC
+@@ -225,8 +233,6 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
+ */
+ *channel_hwirq = &(ctl->channel_irqs[idx]);
+
+- spin_unlock(&ctl->lock);
+-
+ pr_debug("hwirq %lu assigned to channel %d - irq %u\n",
+ hwirq, idx, **channel_hwirq);
+
+@@ -287,13 +293,9 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
+ val |= REG_EDGE_POL_LOW(params, idx);
+ }
+
+- spin_lock(&ctl->lock);
+-
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
+ REG_EDGE_POL_MASK(params, idx), val);
+
+- spin_unlock(&ctl->lock);
+-
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index 844fdcf55118..2d4ed751333f 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -3748,7 +3748,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
+ FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+- if (ret < 0)
++ if (ret)
+ return ret;
+ *phy_fw_ver = val;
+ return 0;
+diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
+index f79e57f735b3..d89568f810bc 100644
+--- a/drivers/net/ethernet/freescale/fec.h
++++ b/drivers/net/ethernet/freescale/fec.h
+@@ -488,6 +488,12 @@ struct fec_enet_priv_rx_q {
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ };
+
++struct fec_stop_mode_gpr {
++ struct regmap *gpr;
++ u8 reg;
++ u8 bit;
++};
++
+ /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors. The
+ * cur_rx and cur_tx point to the currently available buffer.
+@@ -562,6 +568,7 @@ struct fec_enet_private {
+ int hwts_tx_en;
+ struct delayed_work time_keep;
+ struct regulator *reg_phy;
++ struct fec_stop_mode_gpr stop_gpr;
+
+ unsigned int tx_align;
+ unsigned int rx_align;
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 23c5fef2f1ad..869efbb6c4d0 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -62,6 +62,8 @@
+ #include <linux/if_vlan.h>
+ #include <linux/pinctrl/consumer.h>
+ #include <linux/prefetch.h>
++#include <linux/mfd/syscon.h>
++#include <linux/regmap.h>
+ #include <soc/imx/cpuidle.h>
+
+ #include <asm/cacheflush.h>
+@@ -84,6 +86,56 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
+ #define FEC_ENET_OPD_V 0xFFF0
+ #define FEC_MDIO_PM_TIMEOUT 100 /* ms */
+
++struct fec_devinfo {
++ u32 quirks;
++ u8 stop_gpr_reg;
++ u8 stop_gpr_bit;
++};
++
++static const struct fec_devinfo fec_imx25_info = {
++ .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
++ FEC_QUIRK_HAS_FRREG,
++};
++
++static const struct fec_devinfo fec_imx27_info = {
++ .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
++};
++
++static const struct fec_devinfo fec_imx28_info = {
++ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
++ FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
++ FEC_QUIRK_HAS_FRREG,
++};
++
++static const struct fec_devinfo fec_imx6q_info = {
++ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
++ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
++ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
++ FEC_QUIRK_HAS_RACC,
++ .stop_gpr_reg = 0x34,
++ .stop_gpr_bit = 27,
++};
++
++static const struct fec_devinfo fec_mvf600_info = {
++ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
++};
++
++static const struct fec_devinfo fec_imx6x_info = {
++ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
++ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
++ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
++ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
++ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
++};
++
++static const struct fec_devinfo fec_imx6ul_info = {
++ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
++ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
++ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
++ FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
++ FEC_QUIRK_HAS_COALESCE,
++};
++
+ static struct platform_device_id fec_devtype[] = {
+ {
+ /* keep it for coldfire */
+@@ -91,39 +143,25 @@ static struct platform_device_id fec_devtype[] = {
+ .driver_data = 0,
+ }, {
+ .name = "imx25-fec",
+- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
+- FEC_QUIRK_HAS_FRREG,
++ .driver_data = (kernel_ulong_t)&fec_imx25_info,
+ }, {
+ .name = "imx27-fec",
+- .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
++ .driver_data = (kernel_ulong_t)&fec_imx27_info,
+ }, {
+ .name = "imx28-fec",
+- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+- FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+- FEC_QUIRK_HAS_FRREG,
++ .driver_data = (kernel_ulong_t)&fec_imx28_info,
+ }, {
+ .name = "imx6q-fec",
+- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
+- FEC_QUIRK_HAS_RACC,
++ .driver_data = (kernel_ulong_t)&fec_imx6q_info,
+ }, {
+ .name = "mvf600-fec",
+- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
++ .driver_data = (kernel_ulong_t)&fec_mvf600_info,
+ }, {
+ .name = "imx6sx-fec",
+- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+- FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
++ .driver_data = (kernel_ulong_t)&fec_imx6x_info,
+ }, {
+ .name = "imx6ul-fec",
+- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
+- FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
+- FEC_QUIRK_HAS_COALESCE,
++ .driver_data = (kernel_ulong_t)&fec_imx6ul_info,
+ }, {
+ /* sentinel */
+ }
+@@ -1092,11 +1130,28 @@ fec_restart(struct net_device *ndev)
+
+ }
+
++static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
++{
++ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
++ struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
++
++ if (stop_gpr->gpr) {
++ if (enabled)
++ regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
++ BIT(stop_gpr->bit),
++ BIT(stop_gpr->bit));
++ else
++ regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
++ BIT(stop_gpr->bit), 0);
++ } else if (pdata && pdata->sleep_mode_enable) {
++ pdata->sleep_mode_enable(enabled);
++ }
++}
++
+ static void
+ fec_stop(struct net_device *ndev)
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+- struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+ u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
+ u32 val;
+
+@@ -1125,9 +1180,7 @@ fec_stop(struct net_device *ndev)
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+-
+- if (pdata && pdata->sleep_mode_enable)
+- pdata->sleep_mode_enable(true);
++ fec_enet_stop_mode(fep, true);
+ }
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+@@ -3397,6 +3450,37 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev)
+ return irq_cnt;
+ }
+
++static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
++ struct fec_devinfo *dev_info,
++ struct device_node *np)
++{
++ struct device_node *gpr_np;
++ int ret = 0;
++
++ if (!dev_info)
++ return 0;
++
++ gpr_np = of_parse_phandle(np, "gpr", 0);
++ if (!gpr_np)
++ return 0;
++
++ fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
++ if (IS_ERR(fep->stop_gpr.gpr)) {
++ dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
++ ret = PTR_ERR(fep->stop_gpr.gpr);
++ fep->stop_gpr.gpr = NULL;
++ goto out;
++ }
++
++ fep->stop_gpr.reg = dev_info->stop_gpr_reg;
++ fep->stop_gpr.bit = dev_info->stop_gpr_bit;
++
++out:
++ of_node_put(gpr_np);
++
++ return ret;
++}
++
+ static int
+ fec_probe(struct platform_device *pdev)
+ {
+@@ -3412,6 +3496,7 @@ fec_probe(struct platform_device *pdev)
+ int num_rx_qs;
+ char irq_name[8];
+ int irq_cnt;
++ struct fec_devinfo *dev_info;
+
+ fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
+
+@@ -3429,7 +3514,9 @@ fec_probe(struct platform_device *pdev)
+ of_id = of_match_device(fec_dt_ids, &pdev->dev);
+ if (of_id)
+ pdev->id_entry = of_id->data;
+- fep->quirks = pdev->id_entry->driver_data;
++ dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data;
++ if (dev_info)
++ fep->quirks = dev_info->quirks;
+
+ fep->netdev = ndev;
+ fep->num_rx_queues = num_rx_qs;
+@@ -3463,6 +3550,10 @@ fec_probe(struct platform_device *pdev)
+ if (of_get_property(np, "fsl,magic-packet", NULL))
+ fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
+
++ ret = fec_enet_init_stop_mode(fep, dev_info, np);
++ if (ret)
++ goto failed_stop_mode;
++
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy_node && of_phy_is_fixed_link(np)) {
+ ret = of_phy_register_fixed_link(np);
+@@ -3631,6 +3722,7 @@ failed_clk:
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ of_node_put(phy_node);
++failed_stop_mode:
+ failed_phy:
+ dev_id--;
+ failed_ioremap:
+@@ -3708,7 +3800,6 @@ static int __maybe_unused fec_resume(struct device *dev)
+ {
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+- struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+ int ret;
+ int val;
+
+@@ -3726,8 +3817,8 @@ static int __maybe_unused fec_resume(struct device *dev)
+ goto failed_clk;
+ }
+ if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
+- if (pdata && pdata->sleep_mode_enable)
+- pdata->sleep_mode_enable(false);
++ fec_enet_stop_mode(fep, false);
++
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+index 94d7b69a95c7..eb2e57ff08a6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+@@ -935,7 +935,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
+ return NULL;
+ }
+
+- tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
++ tracer = kvzalloc(sizeof(*tracer), GFP_KERNEL);
+ if (!tracer)
+ return ERR_PTR(-ENOMEM);
+
+@@ -982,7 +982,7 @@ destroy_workqueue:
+ tracer->dev = NULL;
+ destroy_workqueue(tracer->work_queue);
+ free_tracer:
+- kfree(tracer);
++ kvfree(tracer);
+ return ERR_PTR(err);
+ }
+
+@@ -1061,7 +1061,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
+ mlx5_fw_tracer_destroy_log_buf(tracer);
+ flush_workqueue(tracer->work_queue);
+ destroy_workqueue(tracer->work_queue);
+- kfree(tracer);
++ kvfree(tracer);
+ }
+
+ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index ddd2409fc8be..5a5e6a21c6e1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -367,6 +367,7 @@ enum {
+ MLX5E_SQ_STATE_AM,
+ MLX5E_SQ_STATE_TLS,
+ MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
++ MLX5E_SQ_STATE_PENDING_XSK_TX,
+ };
+
+ struct mlx5e_sq_wqe_info {
+@@ -950,7 +951,7 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
+ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+-void mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
++int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
+ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
+ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
+ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+index fe2d596cb361..3bcdb5b2fc20 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+@@ -33,6 +33,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state)))
+ return 0;
+
++ if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state))
++ return 0;
++
+ spin_lock(&c->xskicosq_lock);
+ mlx5e_trigger_irq(&c->xskicosq);
+ spin_unlock(&c->xskicosq_lock);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 265073996432..d02db5aebac4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3568,7 +3568,12 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ struct mlx5e_vport_stats *vstats = &priv->stats.vport;
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+
+- if (!mlx5e_monitor_counter_supported(priv)) {
++ /* In switchdev mode, monitor counters doesn't monitor
++ * rx/tx stats of 802_3. The update stats mechanism
++ * should keep the 802_3 layout counters updated
++ */
++ if (!mlx5e_monitor_counter_supported(priv) ||
++ mlx5e_is_uplink_rep(priv)) {
+ /* update HW stats in background for next time */
+ mlx5e_queue_update_stats(priv);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 312d4692425b..a9a96a630e4d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -587,7 +587,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+ return !!err;
+ }
+
+-void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
++int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
+ {
+ struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
+ struct mlx5_cqe64 *cqe;
+@@ -595,11 +595,11 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
+ int i;
+
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+- return;
++ return 0;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (likely(!cqe))
+- return;
++ return 0;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+@@ -648,6 +648,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
+ sq->cc = sqcc;
+
+ mlx5_cqwq_update_db_record(&cq->wq);
++
++ return i;
+ }
+
+ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+index 800d34ed8a96..76efa9579215 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+@@ -145,7 +145,11 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+
+ busy |= rq->post_wqes(rq);
+ if (xsk_open) {
+- mlx5e_poll_ico_cq(&c->xskicosq.cq);
++ if (mlx5e_poll_ico_cq(&c->xskicosq.cq))
++ /* Don't clear the flag if nothing was polled to prevent
++ * queueing more WQEs and overflowing XSKICOSQ.
++ */
++ clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state);
+ busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
+ busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq);
+ }
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index 03bdd2e26329..38a65b984e47 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -4691,26 +4691,20 @@ static void qed_chain_free_single(struct qed_dev *cdev,
+
+ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
+ {
+- void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
++ struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl;
+ u32 page_cnt = p_chain->page_cnt, i, pbl_size;
+- u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
+
+- if (!pp_virt_addr_tbl)
++ if (!pp_addr_tbl)
+ return;
+
+- if (!p_pbl_virt)
+- goto out;
+-
+ for (i = 0; i < page_cnt; i++) {
+- if (!pp_virt_addr_tbl[i])
++ if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map)
+ break;
+
+ dma_free_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+- pp_virt_addr_tbl[i],
+- *(dma_addr_t *)p_pbl_virt);
+-
+- p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
++ pp_addr_tbl[i].virt_addr,
++ pp_addr_tbl[i].dma_map);
+ }
+
+ pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+@@ -4720,9 +4714,9 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
+ pbl_size,
+ p_chain->pbl_sp.p_virt_table,
+ p_chain->pbl_sp.p_phys_table);
+-out:
+- vfree(p_chain->pbl.pp_virt_addr_tbl);
+- p_chain->pbl.pp_virt_addr_tbl = NULL;
++
++ vfree(p_chain->pbl.pp_addr_tbl);
++ p_chain->pbl.pp_addr_tbl = NULL;
+ }
+
+ void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
+@@ -4823,19 +4817,19 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
+ {
+ u32 page_cnt = p_chain->page_cnt, size, i;
+ dma_addr_t p_phys = 0, p_pbl_phys = 0;
+- void **pp_virt_addr_tbl = NULL;
++ struct addr_tbl_entry *pp_addr_tbl;
+ u8 *p_pbl_virt = NULL;
+ void *p_virt = NULL;
+
+- size = page_cnt * sizeof(*pp_virt_addr_tbl);
+- pp_virt_addr_tbl = vzalloc(size);
+- if (!pp_virt_addr_tbl)
++ size = page_cnt * sizeof(*pp_addr_tbl);
++ pp_addr_tbl = vzalloc(size);
++ if (!pp_addr_tbl)
+ return -ENOMEM;
+
+ /* The allocation of the PBL table is done with its full size, since it
+ * is expected to be successive.
+ * qed_chain_init_pbl_mem() is called even in a case of an allocation
+- * failure, since pp_virt_addr_tbl was previously allocated, and it
++ * failure, since tbl was previously allocated, and it
+ * should be saved to allow its freeing during the error flow.
+ */
+ size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+@@ -4849,8 +4843,7 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
+ p_chain->b_external_pbl = true;
+ }
+
+- qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
+- pp_virt_addr_tbl);
++ qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl);
+ if (!p_pbl_virt)
+ return -ENOMEM;
+
+@@ -4869,7 +4862,8 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
+ /* Fill the PBL table with the physical address of the page */
+ *(dma_addr_t *)p_pbl_virt = p_phys;
+ /* Keep the virtual address of the page */
+- p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
++ p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt;
++ p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys;
+
+ p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+ }
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index 2c189c637cca..96356e897c80 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -1087,9 +1087,6 @@ static void qed_update_pf_params(struct qed_dev *cdev,
+ #define QED_PERIODIC_DB_REC_INTERVAL_MS 100
+ #define QED_PERIODIC_DB_REC_INTERVAL \
+ msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
+-#define QED_PERIODIC_DB_REC_WAIT_COUNT 10
+-#define QED_PERIODIC_DB_REC_WAIT_INTERVAL \
+- (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT)
+
+ static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
+ enum qed_slowpath_wq_flag wq_flag,
+@@ -1123,7 +1120,7 @@ void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
+
+ static void qed_slowpath_wq_stop(struct qed_dev *cdev)
+ {
+- int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT;
++ int i;
+
+ if (IS_VF(cdev))
+ return;
+@@ -1135,13 +1132,7 @@ static void qed_slowpath_wq_stop(struct qed_dev *cdev)
+ /* Stop queuing new delayed works */
+ cdev->hwfns[i].slowpath_wq_active = false;
+
+- /* Wait until the last periodic doorbell recovery is executed */
+- while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
+- &cdev->hwfns[i].slowpath_task_flags) &&
+- sleep_count--)
+- msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL);
+-
+- flush_workqueue(cdev->hwfns[i].slowpath_wq);
++ cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
+ destroy_workqueue(cdev->hwfns[i].slowpath_wq);
+ }
+ }
+diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
+index b0d76bc19673..1799ff9a45d9 100644
+--- a/drivers/net/ethernet/sfc/efx_common.c
++++ b/drivers/net/ethernet/sfc/efx_common.c
+@@ -200,11 +200,11 @@ void efx_link_status_changed(struct efx_nic *efx)
+ unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
+ {
+ /* The maximum MTU that we can fit in a single page, allowing for
+- * framing, overhead and XDP headroom.
++ * framing, overhead and XDP headroom + tailroom.
+ */
+ int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
+ efx->rx_prefix_size + efx->type->rx_buffer_padding +
+- efx->rx_ip_align + XDP_PACKET_HEADROOM;
++ efx->rx_ip_align + EFX_XDP_HEADROOM + EFX_XDP_TAILROOM;
+
+ return PAGE_SIZE - overhead;
+ }
+@@ -302,8 +302,9 @@ static void efx_start_datapath(struct efx_nic *efx)
+ efx->rx_dma_len = (efx->rx_prefix_size +
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_padding);
+- rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
+- efx->rx_ip_align + efx->rx_dma_len);
++ rx_buf_len = (sizeof(struct efx_rx_page_state) + EFX_XDP_HEADROOM +
++ efx->rx_ip_align + efx->rx_dma_len + EFX_XDP_TAILROOM);
++
+ if (rx_buf_len <= PAGE_SIZE) {
+ efx->rx_scatter = efx->type->always_rx_scatter;
+ efx->rx_buffer_order = 0;
+diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
+index 8164f0edcbf0..c8dcba482d89 100644
+--- a/drivers/net/ethernet/sfc/net_driver.h
++++ b/drivers/net/ethernet/sfc/net_driver.h
+@@ -91,6 +91,12 @@
+ #define EFX_RX_BUF_ALIGNMENT 4
+ #endif
+
++/* Non-standard XDP_PACKET_HEADROOM and tailroom to satisfy XDP_REDIRECT and
++ * still fit two standard MTU size packets into a single 4K page.
++ */
++#define EFX_XDP_HEADROOM 128
++#define EFX_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
++
+ /* Forward declare Precision Time Protocol (PTP) support structure. */
+ struct efx_ptp_data;
+ struct hwtstamp_config;
+diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
+index a2042f16babc..260352d97d9d 100644
+--- a/drivers/net/ethernet/sfc/rx.c
++++ b/drivers/net/ethernet/sfc/rx.c
+@@ -302,7 +302,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
+ efx->rx_prefix_size);
+
+ xdp.data = *ehp;
+- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
++ xdp.data_hard_start = xdp.data - EFX_XDP_HEADROOM;
+
+ /* No support yet for XDP metadata */
+ xdp_set_data_meta_invalid(&xdp);
+diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
+index ee8beb87bdc1..e10c23833515 100644
+--- a/drivers/net/ethernet/sfc/rx_common.c
++++ b/drivers/net/ethernet/sfc/rx_common.c
+@@ -412,10 +412,10 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
+- XDP_PACKET_HEADROOM;
++ EFX_XDP_HEADROOM;
+ rx_buf->page = page;
+ rx_buf->page_offset = page_offset + efx->rx_ip_align +
+- XDP_PACKET_HEADROOM;
++ EFX_XDP_HEADROOM;
+ rx_buf->len = efx->rx_dma_len;
+ rx_buf->flags = 0;
+ ++rx_queue->added_count;
+@@ -433,7 +433,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
+ void efx_rx_config_page_split(struct efx_nic *efx)
+ {
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
+- XDP_PACKET_HEADROOM,
++ EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
+ EFX_RX_BUF_ALIGNMENT);
+ efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+ ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+index e0212d2fc2a1..fa32cd5b418e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+@@ -241,6 +241,8 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val)
+ switch (phymode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
++ case PHY_INTERFACE_MODE_RGMII_RXID:
++ case PHY_INTERFACE_MODE_RGMII_TXID:
+ *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+index 23627c953a5e..436f501be937 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+@@ -729,9 +729,18 @@ static int brcmf_net_mon_stop(struct net_device *ndev)
+ return err;
+ }
+
++static netdev_tx_t brcmf_net_mon_start_xmit(struct sk_buff *skb,
++ struct net_device *ndev)
++{
++ dev_kfree_skb_any(skb);
++
++ return NETDEV_TX_OK;
++}
++
+ static const struct net_device_ops brcmf_netdev_ops_mon = {
+ .ndo_open = brcmf_net_mon_open,
+ .ndo_stop = brcmf_net_mon_stop,
++ .ndo_start_xmit = brcmf_net_mon_start_xmit,
+ };
+
+ int brcmf_net_mon_attach(struct brcmf_if *ifp)
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index 0481796f75bc..c24350222133 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1467,7 +1467,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
+ kmemdup(pieces->dbg_conf_tlv[i],
+ pieces->dbg_conf_tlv_len[i],
+ GFP_KERNEL);
+- if (!pieces->dbg_conf_tlv[i])
++ if (!drv->fw.dbg.conf_tlv[i])
+ goto out_free_fw;
+ }
+ }
+diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
+index 1fbc14c149ec..fbaad23e8eb1 100644
+--- a/drivers/net/wireless/realtek/rtw88/pci.c
++++ b/drivers/net/wireless/realtek/rtw88/pci.c
+@@ -1287,22 +1287,17 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
+ rtw_pci_link_cfg(rtwdev);
+ }
+
+-#ifdef CONFIG_PM
+-static int rtw_pci_suspend(struct device *dev)
++static int __maybe_unused rtw_pci_suspend(struct device *dev)
+ {
+ return 0;
+ }
+
+-static int rtw_pci_resume(struct device *dev)
++static int __maybe_unused rtw_pci_resume(struct device *dev)
+ {
+ return 0;
+ }
+
+ static SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
+-#define RTW_PM_OPS (&rtw_pm_ops)
+-#else
+-#define RTW_PM_OPS NULL
+-#endif
+
+ static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
+ {
+@@ -1530,7 +1525,7 @@ static struct pci_driver rtw_pci_driver = {
+ .id_table = rtw_pci_id_table,
+ .probe = rtw_pci_probe,
+ .remove = rtw_pci_remove,
+- .driver.pm = RTW_PM_OPS,
++ .driver.pm = &rtw_pm_ops,
+ };
+ module_pci_driver(rtw_pci_driver);
+
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index b7347bc6a24d..ca9ed5774eb1 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4465,6 +4465,29 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+
++/*
++ * Many Zhaoxin Root Ports and Switch Downstream Ports have no ACS capability.
++ * But the implementation could block peer-to-peer transactions between them
++ * and provide ACS-like functionality.
++ */
++static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
++{
++ if (!pci_is_pcie(dev) ||
++ ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
++ (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
++ return -ENOTTY;
++
++ switch (dev->device) {
++ case 0x0710 ... 0x071e:
++ case 0x0721:
++ case 0x0723 ... 0x0732:
++ return pci_acs_ctrl_enabled(acs_flags,
++ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
++ }
++
++ return false;
++}
++
+ /*
+ * Many Intel PCH Root Ports do provide ACS-like features to disable peer
+ * transactions and validate bus numbers in requests, but do not provide an
+@@ -4767,6 +4790,12 @@ static const struct pci_dev_acs_enabled {
+ { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
+ /* Amazon Annapurna Labs */
+ { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
++ /* Zhaoxin multi-function devices */
++ { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
++ { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
++ { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
++ /* Zhaoxin Root/Downstream Ports */
++ { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
+ { 0 }
+ };
+
+@@ -5527,3 +5556,21 @@ out_disable:
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
+ PCI_CLASS_DISPLAY_VGA, 8,
+ quirk_reset_lenovo_thinkpad_p50_nvgpu);
++
++/*
++ * Device [1b21:2142]
++ * When in D0, PME# doesn't get asserted when plugging USB 3.0 device.
++ */
++static void pci_fixup_no_d0_pme(struct pci_dev *dev)
++{
++ pci_info(dev, "PME# does not work under D0, disabling it\n");
++ dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
++
++static void apex_pci_fixup_class(struct pci_dev *pdev)
++{
++ pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
++}
++DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a,
++ PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
+diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
+index deb20096146a..0066c83636d0 100644
+--- a/drivers/remoteproc/mtk_common.h
++++ b/drivers/remoteproc/mtk_common.h
+@@ -68,7 +68,7 @@ struct mtk_scp {
+ wait_queue_head_t ack_wq;
+
+ void __iomem *cpu_addr;
+- phys_addr_t phys_addr;
++ dma_addr_t dma_addr;
+ size_t dram_size;
+
+ struct rproc_subdev *rpmsg_subdev;
+diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
+index 7ccdf64ff3ea..a6327617868e 100644
+--- a/drivers/remoteproc/mtk_scp.c
++++ b/drivers/remoteproc/mtk_scp.c
+@@ -330,7 +330,7 @@ static void *scp_da_to_va(struct rproc *rproc, u64 da, int len)
+ if (offset >= 0 && (offset + len) < scp->sram_size)
+ return (void __force *)scp->sram_base + offset;
+ } else {
+- offset = da - scp->phys_addr;
++ offset = da - scp->dma_addr;
+ if (offset >= 0 && (offset + len) < scp->dram_size)
+ return (void __force *)scp->cpu_addr + offset;
+ }
+@@ -451,7 +451,7 @@ static int scp_map_memory_region(struct mtk_scp *scp)
+ /* Reserved SCP code size */
+ scp->dram_size = MAX_CODE_SIZE;
+ scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
+- &scp->phys_addr, GFP_KERNEL);
++ &scp->dma_addr, GFP_KERNEL);
+ if (!scp->cpu_addr)
+ return -ENOMEM;
+
+@@ -461,7 +461,7 @@ static int scp_map_memory_region(struct mtk_scp *scp)
+ static void scp_unmap_memory_region(struct mtk_scp *scp)
+ {
+ dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
+- scp->phys_addr);
++ scp->dma_addr);
+ of_reserved_mem_device_release(scp->dev);
+ }
+
+diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
+index 223f1f9d0922..646512d7276f 100644
+--- a/drivers/soc/xilinx/Kconfig
++++ b/drivers/soc/xilinx/Kconfig
+@@ -19,7 +19,7 @@ config XILINX_VCU
+
+ config ZYNQMP_POWER
+ bool "Enable Xilinx Zynq MPSoC Power Management driver"
+- depends on PM && ARCH_ZYNQMP
++ depends on PM && ZYNQMP_FIRMWARE
+ default y
+ select MAILBOX
+ select ZYNQMP_IPI_MBOX
+@@ -35,7 +35,7 @@ config ZYNQMP_POWER
+ config ZYNQMP_PM_DOMAINS
+ bool "Enable Zynq MPSoC generic PM domains"
+ default y
+- depends on PM && ARCH_ZYNQMP && ZYNQMP_FIRMWARE
++ depends on PM && ZYNQMP_FIRMWARE
+ select PM_GENERIC_DOMAINS
+ help
+ Say yes to enable device power management through PM domains
+diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
+index 46199c8ca441..f12f81c8dd2f 100644
+--- a/drivers/staging/gasket/apex_driver.c
++++ b/drivers/staging/gasket/apex_driver.c
+@@ -570,13 +570,6 @@ static const struct pci_device_id apex_pci_ids[] = {
+ { PCI_DEVICE(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID) }, { 0 }
+ };
+
+-static void apex_pci_fixup_class(struct pci_dev *pdev)
+-{
+- pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
+-}
+-DECLARE_PCI_FIXUP_CLASS_HEADER(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID,
+- PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
+-
+ static int apex_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+ {
+diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
+index 6b4b354c88aa..b5c970faf585 100644
+--- a/drivers/target/target_core_fabric_lib.c
++++ b/drivers/target/target_core_fabric_lib.c
+@@ -63,7 +63,7 @@ static int fc_get_pr_transport_id(
+ * encoded TransportID.
+ */
+ ptr = &se_nacl->initiatorname[0];
+- for (i = 0; i < 24; ) {
++ for (i = 0; i < 23; ) {
+ if (!strncmp(&ptr[i], ":", 1)) {
+ i++;
+ continue;
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index 0b9dfa6b17bc..f769bb1e3735 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -2073,6 +2073,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
+ mb->cmd_tail = 0;
+ mb->cmd_head = 0;
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
++ clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
+
+ del_timer(&udev->cmd_timer);
+
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 9460d42f8675..c4be4631937a 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1728,7 +1728,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
+ u32 reg;
+
+ u8 link_state;
+- u8 speed;
+
+ /*
+ * According to the Databook Remote wakeup request should
+@@ -1738,16 +1737,13 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+
+- speed = reg & DWC3_DSTS_CONNECTSPD;
+- if ((speed == DWC3_DSTS_SUPERSPEED) ||
+- (speed == DWC3_DSTS_SUPERSPEED_PLUS))
+- return 0;
+-
+ link_state = DWC3_DSTS_USBLNKST(reg);
+
+ switch (link_state) {
++ case DWC3_LINK_STATE_RESET:
+ case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
+ case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
++ case DWC3_LINK_STATE_RESUME:
+ break;
+ default:
+ return -EINVAL;
+diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
+index 6e0432141c40..22200341c8ec 100644
+--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
++++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
+@@ -1951,10 +1951,10 @@ static irqreturn_t usba_vbus_irq_thread(int irq, void *devid)
+ usba_start(udc);
+ } else {
+ udc->suspended = false;
+- usba_stop(udc);
+-
+ if (udc->driver->disconnect)
+ udc->driver->disconnect(&udc->gadget);
++
++ usba_stop(udc);
+ }
+ udc->vbus_prev = vbus;
+ }
+diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
+index a4d9b5e1e50e..d49c6dc1082d 100644
+--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
++++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
+@@ -540,7 +540,7 @@ static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
+ {
+ struct bdc *bdc = ep->bdc;
+
+- if (req == NULL || &req->queue == NULL || &req->usb_req == NULL)
++ if (req == NULL)
+ return;
+
+ dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
+diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
+index e17ca8156171..a38292ef79f6 100644
+--- a/drivers/xen/xenbus/xenbus_client.c
++++ b/drivers/xen/xenbus/xenbus_client.c
+@@ -448,7 +448,14 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
+ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
+ unsigned int nr_grefs, void **vaddr)
+ {
+- return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
++ int err;
++
++ err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
++ /* Some hypervisors are buggy and can return 1. */
++ if (err > 0)
++ err = GNTST_general_error;
++
++ return err;
+ }
+ EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
+
+diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
+index 6765949b3aab..380ad5ace7cf 100644
+--- a/fs/afs/cmservice.c
++++ b/fs/afs/cmservice.c
+@@ -169,7 +169,7 @@ static int afs_record_cm_probe(struct afs_call *call, struct afs_server *server)
+
+ spin_lock(&server->probe_lock);
+
+- if (!test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) {
++ if (!test_and_set_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) {
+ server->cm_epoch = call->epoch;
+ server->probe.cm_epoch = call->epoch;
+ goto out;
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index ef732dd4e7ef..15ae9c7f9c00 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -1335,7 +1335,7 @@ extern struct afs_volume *afs_create_volume(struct afs_fs_context *);
+ extern void afs_activate_volume(struct afs_volume *);
+ extern void afs_deactivate_volume(struct afs_volume *);
+ extern void afs_put_volume(struct afs_cell *, struct afs_volume *);
+-extern int afs_check_volume_status(struct afs_volume *, struct key *);
++extern int afs_check_volume_status(struct afs_volume *, struct afs_fs_cursor *);
+
+ /*
+ * write.c
+diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
+index 172ba569cd60..2a3305e42b14 100644
+--- a/fs/afs/rotate.c
++++ b/fs/afs/rotate.c
+@@ -192,7 +192,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
+ write_unlock(&vnode->volume->servers_lock);
+
+ set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags);
+- error = afs_check_volume_status(vnode->volume, fc->key);
++ error = afs_check_volume_status(vnode->volume, fc);
+ if (error < 0)
+ goto failed_set_error;
+
+@@ -281,7 +281,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
+
+ set_bit(AFS_VOLUME_WAIT, &vnode->volume->flags);
+ set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags);
+- error = afs_check_volume_status(vnode->volume, fc->key);
++ error = afs_check_volume_status(vnode->volume, fc);
+ if (error < 0)
+ goto failed_set_error;
+
+@@ -341,7 +341,7 @@ start:
+ /* See if we need to do an update of the volume record. Note that the
+ * volume may have moved or even have been deleted.
+ */
+- error = afs_check_volume_status(vnode->volume, fc->key);
++ error = afs_check_volume_status(vnode->volume, fc);
+ if (error < 0)
+ goto failed_set_error;
+
+diff --git a/fs/afs/server.c b/fs/afs/server.c
+index b7f3cb2130ca..11b90ac7ea30 100644
+--- a/fs/afs/server.c
++++ b/fs/afs/server.c
+@@ -594,12 +594,9 @@ retry:
+ }
+
+ ret = wait_on_bit(&server->flags, AFS_SERVER_FL_UPDATING,
+- TASK_INTERRUPTIBLE);
++ (fc->flags & AFS_FS_CURSOR_INTR) ?
++ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ if (ret == -ERESTARTSYS) {
+- if (!(fc->flags & AFS_FS_CURSOR_INTR) && server->addresses) {
+- _leave(" = t [intr]");
+- return true;
+- }
+ fc->error = ret;
+ _leave(" = f [intr]");
+ return false;
+diff --git a/fs/afs/volume.c b/fs/afs/volume.c
+index 92ca5e27573b..4310336b9bb8 100644
+--- a/fs/afs/volume.c
++++ b/fs/afs/volume.c
+@@ -281,7 +281,7 @@ error:
+ /*
+ * Make sure the volume record is up to date.
+ */
+-int afs_check_volume_status(struct afs_volume *volume, struct key *key)
++int afs_check_volume_status(struct afs_volume *volume, struct afs_fs_cursor *fc)
+ {
+ time64_t now = ktime_get_real_seconds();
+ int ret, retries = 0;
+@@ -299,7 +299,7 @@ retry:
+ }
+
+ if (!test_and_set_bit_lock(AFS_VOLUME_UPDATING, &volume->flags)) {
+- ret = afs_update_volume_status(volume, key);
++ ret = afs_update_volume_status(volume, fc->key);
+ clear_bit_unlock(AFS_VOLUME_WAIT, &volume->flags);
+ clear_bit_unlock(AFS_VOLUME_UPDATING, &volume->flags);
+ wake_up_bit(&volume->flags, AFS_VOLUME_WAIT);
+@@ -312,7 +312,9 @@ retry:
+ return 0;
+ }
+
+- ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT, TASK_INTERRUPTIBLE);
++ ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT,
++ (fc->flags & AFS_FS_CURSOR_INTR) ?
++ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ if (ret == -ERESTARTSYS) {
+ _leave(" = %d", ret);
+ return ret;
+diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
+index 83b6d67325f6..b5b45c57e1b1 100644
+--- a/fs/afs/yfsclient.c
++++ b/fs/afs/yfsclient.c
+@@ -165,15 +165,15 @@ static void xdr_dump_bad(const __be32 *bp)
+ int i;
+
+ pr_notice("YFS XDR: Bad status record\n");
+- for (i = 0; i < 5 * 4 * 4; i += 16) {
++ for (i = 0; i < 6 * 4 * 4; i += 16) {
+ memcpy(x, bp, 16);
+ bp += 4;
+ pr_notice("%03x: %08x %08x %08x %08x\n",
+ i, ntohl(x[0]), ntohl(x[1]), ntohl(x[2]), ntohl(x[3]));
+ }
+
+- memcpy(x, bp, 4);
+- pr_notice("0x50: %08x\n", ntohl(x[0]));
++ memcpy(x, bp, 8);
++ pr_notice("0x60: %08x %08x\n", ntohl(x[0]), ntohl(x[1]));
+ }
+
+ /*
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index f95ee99091e4..eab18b7b56e7 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -663,7 +663,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
+ * block has been written back to disk. (Yes, these values are
+ * somewhat arbitrary...)
+ */
+-#define RECENTCY_MIN 5
++#define RECENTCY_MIN 60
+ #define RECENTCY_DIRTY 300
+
+ static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 37f65ad0d823..4d3c81fd0902 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1974,7 +1974,7 @@ static int ext4_writepage(struct page *page,
+ bool keep_towrite = false;
+
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
+- ext4_invalidatepage(page, 0, PAGE_SIZE);
++ inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
+ unlock_page(page);
+ return -EIO;
+ }
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 51a78eb65f3c..2f7aebee1a7b 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1936,7 +1936,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
+ int free;
+
+ free = e4b->bd_info->bb_free;
+- BUG_ON(free <= 0);
++ if (WARN_ON(free <= 0))
++ return;
+
+ i = e4b->bd_info->bb_first_free;
+
+@@ -1959,7 +1960,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
+ }
+
+ mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
+- BUG_ON(ex.fe_len <= 0);
++ if (WARN_ON(ex.fe_len <= 0))
++ break;
+ if (free < ex.fe_len) {
+ ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
+ "%d free clusters as per "
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 16da3b3481a4..446158ab507d 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3610,7 +3610,8 @@ int ext4_calculate_overhead(struct super_block *sb)
+ */
+ if (sbi->s_journal && !sbi->journal_bdev)
+ overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
+- else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
++ else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
++ /* j_inum for internal journal is non-zero */
+ j_inode = ext4_get_journal_inode(sb, j_inum);
+ if (j_inode) {
+ j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 65cfe9ab47be..de9fbe7ed06c 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -267,6 +267,8 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
+ if (!nbl) {
+ nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
+ if (nbl) {
++ INIT_LIST_HEAD(&nbl->nbl_list);
++ INIT_LIST_HEAD(&nbl->nbl_lru);
+ fh_copy_shallow(&nbl->nbl_fh, fh);
+ locks_init_lock(&nbl->nbl_lock);
+ nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
+diff --git a/fs/pnode.c b/fs/pnode.c
+index 49f6d7ff2139..1106137c747a 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -261,14 +261,13 @@ static int propagate_one(struct mount *m)
+ child = copy_tree(last_source, last_source->mnt.mnt_root, type);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
++ read_seqlock_excl(&mount_lock);
+ mnt_set_mountpoint(m, mp, child);
++ if (m->mnt_master != dest_master)
++ SET_MNT_MARK(m->mnt_master);
++ read_sequnlock_excl(&mount_lock);
+ last_dest = m;
+ last_source = child;
+- if (m->mnt_master != dest_master) {
+- read_seqlock_excl(&mount_lock);
+- SET_MNT_MARK(m->mnt_master);
+- read_sequnlock_excl(&mount_lock);
+- }
+ hlist_add_head(&child->mnt_hash, list);
+ return count_mounts(m->mnt_ns, child);
+ }
+diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
+index edf43ddd7dce..7dd740e3692d 100644
+--- a/fs/ubifs/orphan.c
++++ b/fs/ubifs/orphan.c
+@@ -688,14 +688,14 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
+
+ ino_key_init(c, &key1, inum);
+ err = ubifs_tnc_lookup(c, &key1, ino);
+- if (err)
++ if (err && err != -ENOENT)
+ goto out_free;
+
+ /*
+ * Check whether an inode can really get deleted.
+ * linkat() with O_TMPFILE allows rebirth of an inode.
+ */
+- if (ino->nlink == 0) {
++ if (err == 0 && ino->nlink == 0) {
+ dbg_rcvry("deleting orphaned inode %lu",
+ (unsigned long)inum);
+
+diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
+index 8dc2e5414276..00932d2b503b 100644
+--- a/fs/xfs/xfs_icache.c
++++ b/fs/xfs/xfs_icache.c
+@@ -907,7 +907,12 @@ xfs_eofblocks_worker(
+ {
+ struct xfs_mount *mp = container_of(to_delayed_work(work),
+ struct xfs_mount, m_eofblocks_work);
++
++ if (!sb_start_write_trylock(mp->m_super))
++ return;
+ xfs_icache_free_eofblocks(mp, NULL);
++ sb_end_write(mp->m_super);
++
+ xfs_queue_eofblocks(mp);
+ }
+
+@@ -934,7 +939,12 @@ xfs_cowblocks_worker(
+ {
+ struct xfs_mount *mp = container_of(to_delayed_work(work),
+ struct xfs_mount, m_cowblocks_work);
++
++ if (!sb_start_write_trylock(mp->m_super))
++ return;
+ xfs_icache_free_cowblocks(mp, NULL);
++ sb_end_write(mp->m_super);
++
+ xfs_queue_cowblocks(mp);
+ }
+
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index d42de92cb283..4a99e0b0f333 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -2264,7 +2264,10 @@ xfs_file_ioctl(
+ if (error)
+ return error;
+
+- return xfs_icache_free_eofblocks(mp, &keofb);
++ sb_start_write(mp->m_super);
++ error = xfs_icache_free_eofblocks(mp, &keofb);
++ sb_end_write(mp->m_super);
++ return error;
+ }
+
+ default:
+diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
+index b0ce04ffd3cd..107bf2a2f344 100644
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1051,6 +1051,7 @@ xfs_reflink_remap_extent(
+ uirec.br_startblock = irec->br_startblock + rlen;
+ uirec.br_startoff = irec->br_startoff + rlen;
+ uirec.br_blockcount = unmap_len - rlen;
++ uirec.br_state = irec->br_state;
+ unmap_len = rlen;
+
+ /* If this isn't a real mapping, we're done. */
+diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
+index 00cc5b8734be..3bc570c90ad9 100644
+--- a/fs/xfs/xfs_trans_ail.c
++++ b/fs/xfs/xfs_trans_ail.c
+@@ -529,8 +529,9 @@ xfsaild(
+ {
+ struct xfs_ail *ailp = data;
+ long tout = 0; /* milliseconds */
++ unsigned int noreclaim_flag;
+
+- current->flags |= PF_MEMALLOC;
++ noreclaim_flag = memalloc_noreclaim_save();
+ set_freezable();
+
+ while (1) {
+@@ -601,6 +602,7 @@ xfsaild(
+ tout = xfsaild_push(ailp);
+ }
+
++ memalloc_noreclaim_restore(noreclaim_flag);
+ return 0;
+ }
+
+diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
+index 83439bfb6c5b..7613a84a2466 100644
+--- a/include/linux/irqchip/arm-gic-v3.h
++++ b/include/linux/irqchip/arm-gic-v3.h
+@@ -241,6 +241,7 @@
+
+ #define GICR_TYPER_PLPIS (1U << 0)
+ #define GICR_TYPER_VLPIS (1U << 1)
++#define GICR_TYPER_DIRTY (1U << 2)
+ #define GICR_TYPER_DirectLPIS (1U << 3)
+ #define GICR_TYPER_LAST (1U << 4)
+ #define GICR_TYPER_RVPEID (1U << 7)
+@@ -665,6 +666,7 @@ struct rdists {
+ bool has_vlpis;
+ bool has_rvpeid;
+ bool has_direct_lpi;
++ bool has_vpend_valid_dirty;
+ };
+
+ struct irq_domain;
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 352c0d708720..6693cf561cd1 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2583,6 +2583,8 @@
+
+ #define PCI_VENDOR_ID_AMAZON 0x1d0f
+
++#define PCI_VENDOR_ID_ZHAOXIN 0x1d17
++
+ #define PCI_VENDOR_ID_HYGON 0x1d94
+
+ #define PCI_VENDOR_ID_HXT 0x1dbf
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index 1e6108b8d15f..e061635e0409 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -202,7 +202,6 @@ __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
+ void dump_stack_print_info(const char *log_lvl);
+ void show_regs_print_info(const char *log_lvl);
+ extern asmlinkage void dump_stack(void) __cold;
+-extern void printk_safe_init(void);
+ extern void printk_safe_flush(void);
+ extern void printk_safe_flush_on_panic(void);
+ #else
+@@ -269,10 +268,6 @@ static inline void dump_stack(void)
+ {
+ }
+
+-static inline void printk_safe_init(void)
+-{
+-}
+-
+ static inline void printk_safe_flush(void)
+ {
+ }
+diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
+index 2dd0a9ed5b36..733fad7dfbed 100644
+--- a/include/linux/qed/qed_chain.h
++++ b/include/linux/qed/qed_chain.h
+@@ -97,6 +97,11 @@ struct qed_chain_u32 {
+ u32 cons_idx;
+ };
+
++struct addr_tbl_entry {
++ void *virt_addr;
++ dma_addr_t dma_map;
++};
++
+ struct qed_chain {
+ /* fastpath portion of the chain - required for commands such
+ * as produce / consume.
+@@ -107,10 +112,11 @@ struct qed_chain {
+
+ /* Fastpath portions of the PBL [if exists] */
+ struct {
+- /* Table for keeping the virtual addresses of the chain pages,
+- * respectively to the physical addresses in the pbl table.
++ /* Table for keeping the virtual and physical addresses of the
++ * chain pages, respectively to the physical addresses
++ * in the pbl table.
+ */
+- void **pp_virt_addr_tbl;
++ struct addr_tbl_entry *pp_addr_tbl;
+
+ union {
+ struct qed_chain_pbl_u16 u16;
+@@ -287,7 +293,7 @@ qed_chain_advance_page(struct qed_chain *p_chain,
+ *(u32 *)page_to_inc = 0;
+ page_index = *(u32 *)page_to_inc;
+ }
+- *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
++ *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
+ }
+ }
+
+@@ -537,7 +543,7 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
+
+ p_chain->pbl_sp.p_phys_table = 0;
+ p_chain->pbl_sp.p_virt_table = NULL;
+- p_chain->pbl.pp_virt_addr_tbl = NULL;
++ p_chain->pbl.pp_addr_tbl = NULL;
+ }
+
+ /**
+@@ -575,11 +581,11 @@ static inline void qed_chain_init_mem(struct qed_chain *p_chain,
+ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
+ void *p_virt_pbl,
+ dma_addr_t p_phys_pbl,
+- void **pp_virt_addr_tbl)
++ struct addr_tbl_entry *pp_addr_tbl)
+ {
+ p_chain->pbl_sp.p_phys_table = p_phys_pbl;
+ p_chain->pbl_sp.p_virt_table = p_virt_pbl;
+- p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
++ p_chain->pbl.pp_addr_tbl = pp_addr_tbl;
+ }
+
+ /**
+@@ -644,7 +650,7 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
+ break;
+ case QED_CHAIN_MODE_PBL:
+ last_page_idx = p_chain->page_cnt - 1;
+- p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
++ p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
+ break;
+ }
+ /* p_virt_addr points at this stage to the last page of the chain */
+@@ -716,7 +722,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
+ page_cnt = qed_chain_get_page_cnt(p_chain);
+
+ for (i = 0; i < page_cnt; i++)
+- memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
++ memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
+ QED_CHAIN_PAGE_SIZE);
+ }
+
+diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
+index 40f65888dd38..fddad9f5b390 100644
+--- a/include/linux/sunrpc/svc_rdma.h
++++ b/include/linux/sunrpc/svc_rdma.h
+@@ -162,6 +162,7 @@ extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma);
+ extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
+ struct svc_rdma_recv_ctxt *ctxt);
+ extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
++extern void svc_rdma_release_rqst(struct svc_rqst *rqstp);
+ extern int svc_rdma_recvfrom(struct svc_rqst *);
+
+ /* svc_rdma_rw.c */
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index 8a2266676b2d..efb8bad7b0fa 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -1058,6 +1058,7 @@ struct snd_soc_card {
+ const struct snd_soc_dapm_route *of_dapm_routes;
+ int num_of_dapm_routes;
+ bool fully_routed;
++ bool disable_route_checks;
+
+ /* lists of probed devices belonging to this card */
+ struct list_head component_dev_list;
+diff --git a/include/trace/events/iocost.h b/include/trace/events/iocost.h
+index 7ecaa65b7106..c2f580fd371b 100644
+--- a/include/trace/events/iocost.h
++++ b/include/trace/events/iocost.h
+@@ -130,7 +130,7 @@ DEFINE_EVENT(iocg_inuse_update, iocost_inuse_reset,
+
+ TRACE_EVENT(iocost_ioc_vrate_adj,
+
+- TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 (*missed_ppm)[2],
++ TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 *missed_ppm,
+ u32 rq_wait_pct, int nr_lagging, int nr_shortages,
+ int nr_surpluses),
+
+@@ -155,8 +155,8 @@ TRACE_EVENT(iocost_ioc_vrate_adj,
+ __entry->old_vrate = atomic64_read(&ioc->vtime_rate);;
+ __entry->new_vrate = new_vrate;
+ __entry->busy_level = ioc->busy_level;
+- __entry->read_missed_ppm = (*missed_ppm)[READ];
+- __entry->write_missed_ppm = (*missed_ppm)[WRITE];
++ __entry->read_missed_ppm = missed_ppm[READ];
++ __entry->write_missed_ppm = missed_ppm[WRITE];
+ __entry->rq_wait_pct = rq_wait_pct;
+ __entry->nr_lagging = nr_lagging;
+ __entry->nr_shortages = nr_shortages;
+diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
+index c0e4c93324f5..fa14adf24235 100644
+--- a/include/trace/events/rpcrdma.h
++++ b/include/trace/events/rpcrdma.h
+@@ -1699,17 +1699,15 @@ DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
+
+ TRACE_EVENT(svcrdma_post_send,
+ TP_PROTO(
+- const struct ib_send_wr *wr,
+- int status
++ const struct ib_send_wr *wr
+ ),
+
+- TP_ARGS(wr, status),
++ TP_ARGS(wr),
+
+ TP_STRUCT__entry(
+ __field(const void *, cqe)
+ __field(unsigned int, num_sge)
+ __field(u32, inv_rkey)
+- __field(int, status)
+ ),
+
+ TP_fast_assign(
+@@ -1717,12 +1715,11 @@ TRACE_EVENT(svcrdma_post_send,
+ __entry->num_sge = wr->num_sge;
+ __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
+ wr->ex.invalidate_rkey : 0;
+- __entry->status = status;
+ ),
+
+- TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
++ TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x",
+ __entry->cqe, __entry->num_sge,
+- __entry->inv_rkey, __entry->status
++ __entry->inv_rkey
+ )
+ );
+
+@@ -1787,26 +1784,23 @@ TRACE_EVENT(svcrdma_wc_receive,
+ TRACE_EVENT(svcrdma_post_rw,
+ TP_PROTO(
+ const void *cqe,
+- int sqecount,
+- int status
++ int sqecount
+ ),
+
+- TP_ARGS(cqe, sqecount, status),
++ TP_ARGS(cqe, sqecount),
+
+ TP_STRUCT__entry(
+ __field(const void *, cqe)
+ __field(int, sqecount)
+- __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->cqe = cqe;
+ __entry->sqecount = sqecount;
+- __entry->status = status;
+ ),
+
+- TP_printk("cqe=%p sqecount=%d status=%d",
+- __entry->cqe, __entry->sqecount, __entry->status
++ TP_printk("cqe=%p sqecount=%d",
++ __entry->cqe, __entry->sqecount
+ )
+ );
+
+@@ -1902,6 +1896,34 @@ DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
+ DEFINE_SQ_EVENT(full);
+ DEFINE_SQ_EVENT(retry);
+
++TRACE_EVENT(svcrdma_sq_post_err,
++ TP_PROTO(
++ const struct svcxprt_rdma *rdma,
++ int status
++ ),
++
++ TP_ARGS(rdma, status),
++
++ TP_STRUCT__entry(
++ __field(int, avail)
++ __field(int, depth)
++ __field(int, status)
++ __string(addr, rdma->sc_xprt.xpt_remotebuf)
++ ),
++
++ TP_fast_assign(
++ __entry->avail = atomic_read(&rdma->sc_sq_avail);
++ __entry->depth = rdma->sc_sq_depth;
++ __entry->status = status;
++ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
++ ),
++
++ TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
++ __get_str(addr), __entry->avail, __entry->depth,
++ __entry->status
++ )
++);
++
+ #endif /* _TRACE_RPCRDMA_H */
+
+ #include <trace/define_trace.h>
+diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
+index 065218a20bb7..bbd4b42b76c7 100644
+--- a/include/uapi/linux/netfilter/nf_tables.h
++++ b/include/uapi/linux/netfilter/nf_tables.h
+@@ -276,6 +276,7 @@ enum nft_rule_compat_attributes {
+ * @NFT_SET_TIMEOUT: set uses timeouts
+ * @NFT_SET_EVAL: set can be updated from the evaluation path
+ * @NFT_SET_OBJECT: set contains stateful objects
++ * @NFT_SET_CONCAT: set contains a concatenation
+ */
+ enum nft_set_flags {
+ NFT_SET_ANONYMOUS = 0x1,
+@@ -285,6 +286,7 @@ enum nft_set_flags {
+ NFT_SET_TIMEOUT = 0x10,
+ NFT_SET_EVAL = 0x20,
+ NFT_SET_OBJECT = 0x40,
++ NFT_SET_CONCAT = 0x80,
+ };
+
+ /**
+diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
+index bbe791b24168..0e43f674a686 100644
+--- a/include/uapi/linux/pkt_sched.h
++++ b/include/uapi/linux/pkt_sched.h
+@@ -1197,8 +1197,8 @@ enum {
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL]
+ */
+
+-#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST BIT(0)
+-#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD BIT(1)
++#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST _BITUL(0)
++#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD _BITUL(1)
+
+ enum {
+ TCA_TAPRIO_ATTR_UNSPEC,
+diff --git a/init/main.c b/init/main.c
+index ee4947af823f..9c7948b3763a 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -907,7 +907,6 @@ asmlinkage __visible void __init start_kernel(void)
+ boot_init_stack_canary();
+
+ time_init();
+- printk_safe_init();
+ perf_event_init();
+ profile_init();
+ call_function_init();
+diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
+index 70f71b154fa5..3fe0b006d2d2 100644
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -469,7 +469,7 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
+ return -EOVERFLOW;
+
+ /* Make sure CPU is a valid possible cpu */
+- if (!cpu_possible(key_cpu))
++ if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
+ return -ENODEV;
+
+ if (qsize == 0) {
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index e5d12c54b552..1c53ccbd5b5d 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1918,6 +1918,15 @@ static bool register_is_const(struct bpf_reg_state *reg)
+ return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
+ }
+
++static bool __is_pointer_value(bool allow_ptr_leaks,
++ const struct bpf_reg_state *reg)
++{
++ if (allow_ptr_leaks)
++ return false;
++
++ return reg->type != SCALAR_VALUE;
++}
++
+ static void save_register_state(struct bpf_func_state *state,
+ int spi, struct bpf_reg_state *reg)
+ {
+@@ -2108,6 +2117,16 @@ static int check_stack_read(struct bpf_verifier_env *env,
+ * which resets stack/reg liveness for state transitions
+ */
+ state->regs[value_regno].live |= REG_LIVE_WRITTEN;
++ } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
++ /* If value_regno==-1, the caller is asking us whether
++ * it is acceptable to use this value as a SCALAR_VALUE
++ * (e.g. for XADD).
++ * We must not allow unprivileged callers to do that
++ * with spilled pointers.
++ */
++ verbose(env, "leaking pointer from stack off %d\n",
++ off);
++ return -EACCES;
+ }
+ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+ } else {
+@@ -2473,15 +2492,6 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
+ return -EACCES;
+ }
+
+-static bool __is_pointer_value(bool allow_ptr_leaks,
+- const struct bpf_reg_state *reg)
+-{
+- if (allow_ptr_leaks)
+- return false;
+-
+- return reg->type != SCALAR_VALUE;
+-}
+-
+ static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
+ {
+ return cur_regs(env) + regno;
+@@ -2875,7 +2885,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ if (ret < 0)
+ return ret;
+
+- if (atype == BPF_READ) {
++ if (atype == BPF_READ && value_regno >= 0) {
+ if (ret == SCALAR_VALUE) {
+ mark_reg_unknown(env, regs, value_regno);
+ return 0;
+@@ -9882,6 +9892,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
+ return -EINVAL;
+ }
+ env->ops = bpf_verifier_ops[tgt_prog->type];
++ prog->expected_attach_type = tgt_prog->expected_attach_type;
+ }
+ if (!tgt_prog->jited) {
+ verbose(env, "Can attach to only JITed progs\n");
+@@ -10215,6 +10226,13 @@ err_release_maps:
+ * them now. Otherwise free_used_maps() will release them.
+ */
+ release_maps(env);
++
++ /* extension progs temporarily inherit the attach_type of their targets
++ for verification purposes, so set it back to zero before returning
++ */
++ if (env->prog->type == BPF_PROG_TYPE_EXT)
++ env->prog->expected_attach_type = 0;
++
+ *prog = env->prog;
+ err_unlock:
+ if (!is_priv)
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 533c19348189..29ace472f916 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7278,10 +7278,17 @@ static void perf_event_task_output(struct perf_event *event,
+ goto out;
+
+ task_event->event_id.pid = perf_event_pid(event, task);
+- task_event->event_id.ppid = perf_event_pid(event, current);
+-
+ task_event->event_id.tid = perf_event_tid(event, task);
+- task_event->event_id.ptid = perf_event_tid(event, current);
++
++ if (task_event->event_id.header.type == PERF_RECORD_EXIT) {
++ task_event->event_id.ppid = perf_event_pid(event,
++ task->real_parent);
++ task_event->event_id.ptid = perf_event_pid(event,
++ task->real_parent);
++ } else { /* PERF_RECORD_FORK */
++ task_event->event_id.ppid = perf_event_pid(event, current);
++ task_event->event_id.ptid = perf_event_tid(event, current);
++ }
+
+ task_event->event_id.time = perf_event_clock(event);
+
+diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
+index c8e6ab689d42..b2b0f526f249 100644
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -23,6 +23,9 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args);
+ void __printk_safe_enter(void);
+ void __printk_safe_exit(void);
+
++void printk_safe_init(void);
++bool printk_percpu_data_ready(void);
++
+ #define printk_safe_enter_irqsave(flags) \
+ do { \
+ local_irq_save(flags); \
+@@ -64,4 +67,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
+ #define printk_safe_enter_irq() local_irq_disable()
+ #define printk_safe_exit_irq() local_irq_enable()
+
++static inline void printk_safe_init(void) { }
++static inline bool printk_percpu_data_ready(void) { return false; }
+ #endif /* CONFIG_PRINTK */
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index fada22dc4ab6..74fbd76cf664 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -460,6 +460,18 @@ static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
+ static char *log_buf = __log_buf;
+ static u32 log_buf_len = __LOG_BUF_LEN;
+
++/*
++ * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
++ * per_cpu_areas are initialised. This variable is set to true when
++ * it's safe to access per-CPU data.
++ */
++static bool __printk_percpu_data_ready __read_mostly;
++
++bool printk_percpu_data_ready(void)
++{
++ return __printk_percpu_data_ready;
++}
++
+ /* Return log buffer address */
+ char *log_buf_addr_get(void)
+ {
+@@ -1146,12 +1158,28 @@ static void __init log_buf_add_cpu(void)
+ static inline void log_buf_add_cpu(void) {}
+ #endif /* CONFIG_SMP */
+
++static void __init set_percpu_data_ready(void)
++{
++ printk_safe_init();
++ /* Make sure we set this flag only after printk_safe() init is done */
++ barrier();
++ __printk_percpu_data_ready = true;
++}
++
+ void __init setup_log_buf(int early)
+ {
+ unsigned long flags;
+ char *new_log_buf;
+ unsigned int free;
+
++ /*
++ * Some archs call setup_log_buf() multiple times - first is very
++ * early, e.g. from setup_arch(), and second - when percpu_areas
++ * are initialised.
++ */
++ if (!early)
++ set_percpu_data_ready();
++
+ if (log_buf != __log_buf)
+ return;
+
+@@ -2966,6 +2994,9 @@ static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
+
+ void wake_up_klogd(void)
+ {
++ if (!printk_percpu_data_ready())
++ return;
++
+ preempt_disable();
+ if (waitqueue_active(&log_wait)) {
+ this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
+@@ -2976,6 +3007,9 @@ void wake_up_klogd(void)
+
+ void defer_console_output(void)
+ {
++ if (!printk_percpu_data_ready())
++ return;
++
+ preempt_disable();
+ __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
+ irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
+index b4045e782743..d9a659a686f3 100644
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -27,7 +27,6 @@
+ * There are situations when we want to make sure that all buffers
+ * were handled or when IRQs are blocked.
+ */
+-static int printk_safe_irq_ready __read_mostly;
+
+ #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
+ sizeof(atomic_t) - \
+@@ -51,7 +50,7 @@ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
+ /* Get flushed in a more safe context. */
+ static void queue_flush_work(struct printk_safe_seq_buf *s)
+ {
+- if (printk_safe_irq_ready)
++ if (printk_percpu_data_ready())
+ irq_work_queue(&s->work);
+ }
+
+@@ -402,14 +401,6 @@ void __init printk_safe_init(void)
+ #endif
+ }
+
+- /*
+- * In the highly unlikely event that a NMI were to trigger at
+- * this moment. Make sure IRQ work is set up before this
+- * variable is set.
+- */
+- barrier();
+- printk_safe_irq_ready = 1;
+-
+ /* Flush pending messages that did not have scheduled IRQ works. */
+ printk_safe_flush();
+ }
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index da8a19470218..3dd675697301 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1239,13 +1239,8 @@ static void uclamp_fork(struct task_struct *p)
+ return;
+
+ for_each_clamp_id(clamp_id) {
+- unsigned int clamp_value = uclamp_none(clamp_id);
+-
+- /* By default, RT tasks always get 100% boost */
+- if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
+- clamp_value = uclamp_none(UCLAMP_MAX);
+-
+- uclamp_se_set(&p->uclamp_req[clamp_id], clamp_value, false);
++ uclamp_se_set(&p->uclamp_req[clamp_id],
++ uclamp_none(clamp_id), false);
+ }
+ }
+
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index dac9104d126f..ff9435dee1df 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -1003,12 +1003,12 @@ u64 kcpustat_field(struct kernel_cpustat *kcpustat,
+ enum cpu_usage_stat usage, int cpu)
+ {
+ u64 *cpustat = kcpustat->cpustat;
++ u64 val = cpustat[usage];
+ struct rq *rq;
+- u64 val;
+ int err;
+
+ if (!vtime_accounting_enabled_cpu(cpu))
+- return cpustat[usage];
++ return val;
+
+ rq = cpu_rq(cpu);
+
+diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
+index 008d6ac2342b..808244f3ddd9 100644
+--- a/kernel/sched/isolation.c
++++ b/kernel/sched/isolation.c
+@@ -149,6 +149,9 @@ __setup("nohz_full=", housekeeping_nohz_full_setup);
+ static int __init housekeeping_isolcpus_setup(char *str)
+ {
+ unsigned int flags = 0;
++ bool illegal = false;
++ char *par;
++ int len;
+
+ while (isalpha(*str)) {
+ if (!strncmp(str, "nohz,", 5)) {
+@@ -169,8 +172,22 @@ static int __init housekeeping_isolcpus_setup(char *str)
+ continue;
+ }
+
+- pr_warn("isolcpus: Error, unknown flag\n");
+- return 0;
++ /*
++ * Skip unknown sub-parameter and validate that it is not
++ * containing an invalid character.
++ */
++ for (par = str, len = 0; *str && *str != ','; str++, len++) {
++ if (!isalpha(*str) && *str != '_')
++ illegal = true;
++ }
++
++ if (illegal) {
++ pr_warn("isolcpus: Invalid flag %.*s\n", len, par);
++ return 0;
++ }
++
++ pr_info("isolcpus: Skipped unknown flag %.*s\n", len, par);
++ str++;
+ }
+
+ /* Default behaviour for isolcpus without flags */
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 7938c60e11dd..9abf962bbde4 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -1510,15 +1510,15 @@ int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
+ unsigned long flags;
+ int ret = -EINVAL;
+
++ if (!valid_signal(sig))
++ return ret;
++
+ clear_siginfo(&info);
+ info.si_signo = sig;
+ info.si_errno = errno;
+ info.si_code = SI_ASYNCIO;
+ *((sigval_t *)&info.si_pid) = addr;
+
+- if (!valid_signal(sig))
+- return ret;
+-
+ rcu_read_lock();
+ p = pid_task(pid, PIDTYPE_PID);
+ if (!p) {
+diff --git a/mm/shmem.c b/mm/shmem.c
+index aad3ba74b0e9..7406f91f8a52 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2404,11 +2404,11 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
+
+ lru_cache_add_anon(page);
+
+- spin_lock(&info->lock);
++ spin_lock_irq(&info->lock);
+ info->alloced++;
+ inode->i_blocks += BLOCKS_PER_PAGE;
+ shmem_recalc_inode(inode);
+- spin_unlock(&info->lock);
++ spin_unlock_irq(&info->lock);
+
+ inc_mm_counter(dst_mm, mm_counter_file(page));
+ page_add_file_rmap(page, false);
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index a78e7f864c1e..56f0ccf677a5 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -51,6 +51,7 @@
+ #include <linux/slab.h>
+ #include <linux/pagemap.h>
+ #include <linux/uio.h>
++#include <linux/indirect_call_wrapper.h>
+
+ #include <net/protocol.h>
+ #include <linux/skbuff.h>
+@@ -414,6 +415,11 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
+ }
+ EXPORT_SYMBOL(skb_kill_datagram);
+
++INDIRECT_CALLABLE_DECLARE(static size_t simple_copy_to_iter(const void *addr,
++ size_t bytes,
++ void *data __always_unused,
++ struct iov_iter *i));
++
+ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len, bool fault_short,
+ size_t (*cb)(const void *, size_t, void *,
+@@ -427,7 +433,8 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+ if (copy > 0) {
+ if (copy > len)
+ copy = len;
+- n = cb(skb->data + offset, copy, data, to);
++ n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
++ skb->data + offset, copy, data, to);
+ offset += n;
+ if (n != copy)
+ goto short_copy;
+@@ -449,8 +456,9 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+
+ if (copy > len)
+ copy = len;
+- n = cb(vaddr + skb_frag_off(frag) + offset - start,
+- copy, data, to);
++ n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
++ vaddr + skb_frag_off(frag) + offset - start,
++ copy, data, to);
+ kunmap(page);
+ offset += n;
+ if (n != copy)
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index d09b3c789314..36978a0e5000 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -1257,15 +1257,15 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
+ sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal)
+ mesh_neighbour_update(sdata, mgmt->sa, &elems,
+ rx_status);
++
++ if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
++ !sdata->vif.csa_active)
++ ieee80211_mesh_process_chnswitch(sdata, &elems, true);
+ }
+
+ if (ifmsh->sync_ops)
+ ifmsh->sync_ops->rx_bcn_presp(sdata,
+ stype, mgmt, &elems, rx_status);
+-
+- if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
+- !sdata->vif.csa_active)
+- ieee80211_mesh_process_chnswitch(sdata, &elems, true);
+ }
+
+ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
+@@ -1373,6 +1373,9 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
+ ieee802_11_parse_elems(pos, len - baselen, true, &elems,
+ mgmt->bssid, NULL);
+
++ if (!mesh_matches_local(sdata, &elems))
++ return;
++
+ ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl;
+ if (!--ifmsh->chsw_ttl)
+ fwd_csa = false;
+diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
+index 64eedc17037a..3d816a1e5442 100644
+--- a/net/netfilter/nf_nat_proto.c
++++ b/net/netfilter/nf_nat_proto.c
+@@ -1035,8 +1035,8 @@ int nf_nat_inet_register_fn(struct net *net, const struct nf_hook_ops *ops)
+ ret = nf_nat_register_fn(net, NFPROTO_IPV4, ops, nf_nat_ipv4_ops,
+ ARRAY_SIZE(nf_nat_ipv4_ops));
+ if (ret)
+- nf_nat_ipv6_unregister_fn(net, ops);
+-
++ nf_nat_unregister_fn(net, NFPROTO_IPV6, ops,
++ ARRAY_SIZE(nf_nat_ipv6_ops));
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(nf_nat_inet_register_fn);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 68ec31c4ae65..116178d373a1 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3949,7 +3949,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
+ if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT |
+ NFT_SET_INTERVAL | NFT_SET_TIMEOUT |
+ NFT_SET_MAP | NFT_SET_EVAL |
+- NFT_SET_OBJECT))
++ NFT_SET_OBJECT | NFT_SET_CONCAT))
+ return -EOPNOTSUPP;
+ /* Only one of these operations is supported */
+ if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
+diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
+index a6c1349e965d..01135e54d95d 100644
+--- a/net/rxrpc/local_object.c
++++ b/net/rxrpc/local_object.c
+@@ -165,15 +165,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
+ goto error;
+ }
+
+- /* we want to set the don't fragment bit */
+- opt = IPV6_PMTUDISC_DO;
+- ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
+- (char *) &opt, sizeof(opt));
+- if (ret < 0) {
+- _debug("setsockopt failed");
+- goto error;
+- }
+-
+ /* Fall through and set IPv4 options too otherwise we don't get
+ * errors from IPv4 packets sent through the IPv6 socket.
+ */
+diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
+index bad3d2420344..90e263c6aa69 100644
+--- a/net/rxrpc/output.c
++++ b/net/rxrpc/output.c
+@@ -474,41 +474,21 @@ send_fragmentable:
+ skb->tstamp = ktime_get_real();
+
+ switch (conn->params.local->srx.transport.family) {
++ case AF_INET6:
+ case AF_INET:
+ opt = IP_PMTUDISC_DONT;
+- ret = kernel_setsockopt(conn->params.local->socket,
+- SOL_IP, IP_MTU_DISCOVER,
+- (char *)&opt, sizeof(opt));
+- if (ret == 0) {
+- ret = kernel_sendmsg(conn->params.local->socket, &msg,
+- iov, 2, len);
+- conn->params.peer->last_tx_at = ktime_get_seconds();
+-
+- opt = IP_PMTUDISC_DO;
+- kernel_setsockopt(conn->params.local->socket, SOL_IP,
+- IP_MTU_DISCOVER,
+- (char *)&opt, sizeof(opt));
+- }
+- break;
+-
+-#ifdef CONFIG_AF_RXRPC_IPV6
+- case AF_INET6:
+- opt = IPV6_PMTUDISC_DONT;
+- ret = kernel_setsockopt(conn->params.local->socket,
+- SOL_IPV6, IPV6_MTU_DISCOVER,
+- (char *)&opt, sizeof(opt));
+- if (ret == 0) {
+- ret = kernel_sendmsg(conn->params.local->socket, &msg,
+- iov, 2, len);
+- conn->params.peer->last_tx_at = ktime_get_seconds();
+-
+- opt = IPV6_PMTUDISC_DO;
+- kernel_setsockopt(conn->params.local->socket,
+- SOL_IPV6, IPV6_MTU_DISCOVER,
+- (char *)&opt, sizeof(opt));
+- }
++ kernel_setsockopt(conn->params.local->socket,
++ SOL_IP, IP_MTU_DISCOVER,
++ (char *)&opt, sizeof(opt));
++ ret = kernel_sendmsg(conn->params.local->socket, &msg,
++ iov, 2, len);
++ conn->params.peer->last_tx_at = ktime_get_seconds();
++
++ opt = IP_PMTUDISC_DO;
++ kernel_setsockopt(conn->params.local->socket,
++ SOL_IP, IP_MTU_DISCOVER,
++ (char *)&opt, sizeof(opt));
+ break;
+-#endif
+
+ default:
+ BUG();
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 298557744818..dc74519286be 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -897,9 +897,6 @@ int svc_send(struct svc_rqst *rqstp)
+ if (!xprt)
+ goto out;
+
+- /* release the receive skb before sending the reply */
+- xprt->xpt_ops->xpo_release_rqst(rqstp);
+-
+ /* calculate over-all length */
+ xb = &rqstp->rq_res;
+ xb->len = xb->head[0].iov_len +
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 2934dd711715..4260924ad9db 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -605,6 +605,8 @@ svc_udp_sendto(struct svc_rqst *rqstp)
+ {
+ int error;
+
++ svc_release_udp_skb(rqstp);
++
+ error = svc_sendto(rqstp, &rqstp->rq_res);
+ if (error == -ECONNREFUSED)
+ /* ICMP error on earlier request. */
+@@ -1137,6 +1139,8 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
+ int sent;
+ __be32 reclen;
+
++ svc_release_skb(rqstp);
++
+ /* Set up the first element of the reply kvec.
+ * Any other kvecs that may be in use have been taken
+ * care of by the server implementation itself.
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index 96bccd398469..b8ee91ffedda 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -222,6 +222,26 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
+ svc_rdma_recv_ctxt_destroy(rdma, ctxt);
+ }
+
++/**
++ * svc_rdma_release_rqst - Release transport-specific per-rqst resources
++ * @rqstp: svc_rqst being released
++ *
++ * Ensure that the recv_ctxt is released whether or not a Reply
++ * was sent. For example, the client could close the connection,
++ * or svc_process could drop an RPC, before the Reply is sent.
++ */
++void svc_rdma_release_rqst(struct svc_rqst *rqstp)
++{
++ struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt;
++ struct svc_xprt *xprt = rqstp->rq_xprt;
++ struct svcxprt_rdma *rdma =
++ container_of(xprt, struct svcxprt_rdma, sc_xprt);
++
++ rqstp->rq_xprt_ctxt = NULL;
++ if (ctxt)
++ svc_rdma_recv_ctxt_put(rdma, ctxt);
++}
++
+ static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
+ struct svc_rdma_recv_ctxt *ctxt)
+ {
+@@ -756,6 +776,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
+ __be32 *p;
+ int ret;
+
++ rqstp->rq_xprt_ctxt = NULL;
++
+ spin_lock(&rdma_xprt->sc_rq_dto_lock);
+ ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
+ if (ctxt) {
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
+index 48fe3b16b0d9..a59912e2666d 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
+@@ -323,8 +323,6 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
+ if (atomic_sub_return(cc->cc_sqecount,
+ &rdma->sc_sq_avail) > 0) {
+ ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
+- trace_svcrdma_post_rw(&cc->cc_cqe,
+- cc->cc_sqecount, ret);
+ if (ret)
+ break;
+ return 0;
+@@ -337,6 +335,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
+ trace_svcrdma_sq_retry(rdma);
+ } while (1);
+
++ trace_svcrdma_sq_post_err(rdma, ret);
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+
+ /* If even one was posted, there will be a completion. */
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index f3f108090aa4..9f234d1f3b3d 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -310,15 +310,17 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
+ }
+
+ svc_xprt_get(&rdma->sc_xprt);
++ trace_svcrdma_post_send(wr);
+ ret = ib_post_send(rdma->sc_qp, wr, NULL);
+- trace_svcrdma_post_send(wr, ret);
+- if (ret) {
+- set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
+- svc_xprt_put(&rdma->sc_xprt);
+- wake_up(&rdma->sc_send_wait);
+- }
+- break;
++ if (ret)
++ break;
++ return 0;
+ }
++
++ trace_svcrdma_sq_post_err(rdma, ret);
++ set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
++ svc_xprt_put(&rdma->sc_xprt);
++ wake_up(&rdma->sc_send_wait);
+ return ret;
+ }
+
+@@ -875,12 +877,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
+ wr_lst, rp_ch);
+ if (ret < 0)
+ goto err1;
+- ret = 0;
+-
+-out:
+- rqstp->rq_xprt_ctxt = NULL;
+- svc_rdma_recv_ctxt_put(rdma, rctxt);
+- return ret;
++ return 0;
+
+ err2:
+ if (ret != -E2BIG && ret != -EINVAL)
+@@ -889,14 +886,12 @@ out:
+ ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp);
+ if (ret < 0)
+ goto err1;
+- ret = 0;
+- goto out;
++ return 0;
+
+ err1:
+ svc_rdma_send_ctxt_put(rdma, sctxt);
+ err0:
+ trace_svcrdma_send_failed(rqstp, ret);
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+- ret = -ENOTCONN;
+- goto out;
++ return -ENOTCONN;
+ }
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 145a3615c319..889220f11a70 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -71,7 +71,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
+ struct sockaddr *sa, int salen,
+ int flags);
+ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
+-static void svc_rdma_release_rqst(struct svc_rqst *);
+ static void svc_rdma_detach(struct svc_xprt *xprt);
+ static void svc_rdma_free(struct svc_xprt *xprt);
+ static int svc_rdma_has_wspace(struct svc_xprt *xprt);
+@@ -558,10 +557,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
+ return NULL;
+ }
+
+-static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
+-{
+-}
+-
+ /*
+ * When connected, an svc_xprt has at least two references:
+ *
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index 467c53a1fb5c..d4675e922a8f 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -1065,7 +1065,7 @@ static void tipc_link_update_cwin(struct tipc_link *l, int released,
+ /* Enter fast recovery */
+ if (unlikely(retransmitted)) {
+ l->ssthresh = max_t(u16, l->window / 2, 300);
+- l->window = l->ssthresh;
++ l->window = min_t(u16, l->ssthresh, l->window);
+ return;
+ }
+ /* Enter slow start */
+diff --git a/net/tipc/msg.h b/net/tipc/msg.h
+index 6d466ebdb64f..871feadbbc19 100644
+--- a/net/tipc/msg.h
++++ b/net/tipc/msg.h
+@@ -394,6 +394,11 @@ static inline u32 msg_connected(struct tipc_msg *m)
+ return msg_type(m) == TIPC_CONN_MSG;
+ }
+
++static inline u32 msg_direct(struct tipc_msg *m)
++{
++ return msg_type(m) == TIPC_DIRECT_MSG;
++}
++
+ static inline u32 msg_errcode(struct tipc_msg *m)
+ {
+ return msg_bits(m, 1, 25, 0xf);
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index d50be9a3d479..803a3a6d0f50 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -1586,7 +1586,8 @@ static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
+ case TIPC_MEDIUM_IMPORTANCE:
+ case TIPC_HIGH_IMPORTANCE:
+ case TIPC_CRITICAL_IMPORTANCE:
+- if (msg_connected(hdr) || msg_named(hdr)) {
++ if (msg_connected(hdr) || msg_named(hdr) ||
++ msg_direct(hdr)) {
+ tipc_loopback_trace(peer_net, list);
+ spin_lock_init(&list->lock);
+ tipc_sk_rcv(peer_net, list);
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 693e8902161e..87466607097f 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -1461,7 +1461,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
+ }
+
+ __skb_queue_head_init(&pkts);
+- mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
++ mtu = tipc_node_get_mtu(net, dnode, tsk->portid, true);
+ rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
+ if (unlikely(rc != dlen))
+ return rc;
+diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
+index 752ff0a225a9..f24ff5a903ae 100644
+--- a/scripts/Makefile.lib
++++ b/scripts/Makefile.lib
+@@ -308,7 +308,7 @@ define rule_dtc
+ endef
+
+ $(obj)/%.dt.yaml: $(src)/%.dts $(DTC) $(DT_TMP_SCHEMA) FORCE
+- $(call if_changed_rule,dtc)
++ $(call if_changed_rule,dtc,yaml)
+
+ dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
+
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index af21e9583c0d..59b60b1f26f8 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1203,10 +1203,8 @@ static void azx_vs_set_state(struct pci_dev *pci,
+ if (!disabled) {
+ dev_info(chip->card->dev,
+ "Start delayed initialization\n");
+- if (azx_probe_continue(chip) < 0) {
++ if (azx_probe_continue(chip) < 0)
+ dev_err(chip->card->dev, "initialization error\n");
+- hda->init_failed = true;
+- }
+ }
+ } else {
+ dev_info(chip->card->dev, "%s via vga_switcheroo\n",
+@@ -1339,12 +1337,15 @@ static int register_vga_switcheroo(struct azx *chip)
+ /*
+ * destructor
+ */
+-static int azx_free(struct azx *chip)
++static void azx_free(struct azx *chip)
+ {
+ struct pci_dev *pci = chip->pci;
+ struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+ struct hdac_bus *bus = azx_bus(chip);
+
++ if (hda->freed)
++ return;
++
+ if (azx_has_pm_runtime(chip) && chip->running)
+ pm_runtime_get_noresume(&pci->dev);
+ chip->running = 0;
+@@ -1388,9 +1389,8 @@ static int azx_free(struct azx *chip)
+
+ if (chip->driver_caps & AZX_DCAPS_I915_COMPONENT)
+ snd_hdac_i915_exit(bus);
+- kfree(hda);
+
+- return 0;
++ hda->freed = 1;
+ }
+
+ static int azx_dev_disconnect(struct snd_device *device)
+@@ -1406,7 +1406,8 @@ static int azx_dev_disconnect(struct snd_device *device)
+
+ static int azx_dev_free(struct snd_device *device)
+ {
+- return azx_free(device->device_data);
++ azx_free(device->device_data);
++ return 0;
+ }
+
+ #ifdef SUPPORT_VGA_SWITCHEROO
+@@ -1773,7 +1774,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
+ if (err < 0)
+ return err;
+
+- hda = kzalloc(sizeof(*hda), GFP_KERNEL);
++ hda = devm_kzalloc(&pci->dev, sizeof(*hda), GFP_KERNEL);
+ if (!hda) {
+ pci_disable_device(pci);
+ return -ENOMEM;
+@@ -1814,7 +1815,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
+
+ err = azx_bus_init(chip, model[dev]);
+ if (err < 0) {
+- kfree(hda);
+ pci_disable_device(pci);
+ return err;
+ }
+@@ -2009,7 +2009,7 @@ static int azx_first_init(struct azx *chip)
+ /* codec detection */
+ if (!azx_bus(chip)->codec_mask) {
+ dev_err(card->dev, "no codecs found!\n");
+- return -ENODEV;
++ /* keep running the rest for the runtime PM */
+ }
+
+ if (azx_acquire_irq(chip, 0) < 0)
+@@ -2302,9 +2302,11 @@ static int azx_probe_continue(struct azx *chip)
+ #endif
+
+ /* create codec instances */
+- err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]);
+- if (err < 0)
+- goto out_free;
++ if (bus->codec_mask) {
++ err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]);
++ if (err < 0)
++ goto out_free;
++ }
+
+ #ifdef CONFIG_SND_HDA_PATCH_LOADER
+ if (chip->fw) {
+@@ -2318,7 +2320,7 @@ static int azx_probe_continue(struct azx *chip)
+ #endif
+ }
+ #endif
+- if ((probe_only[dev] & 1) == 0) {
++ if (bus->codec_mask && !(probe_only[dev] & 1)) {
+ err = azx_codec_configure(chip);
+ if (err < 0)
+ goto out_free;
+@@ -2335,17 +2337,23 @@ static int azx_probe_continue(struct azx *chip)
+
+ set_default_power_save(chip);
+
+- if (azx_has_pm_runtime(chip))
++ if (azx_has_pm_runtime(chip)) {
++ pm_runtime_use_autosuspend(&pci->dev);
++ pm_runtime_allow(&pci->dev);
+ pm_runtime_put_autosuspend(&pci->dev);
++ }
+
+ out_free:
+- if (err < 0 || !hda->need_i915_power)
++ if (err < 0) {
++ azx_free(chip);
++ return err;
++ }
++
++ if (!hda->need_i915_power)
+ display_power(chip, false);
+- if (err < 0)
+- hda->init_failed = 1;
+ complete_all(&hda->probe_wait);
+ to_hda_bus(bus)->bus_probing = 0;
+- return err;
++ return 0;
+ }
+
+ static void azx_remove(struct pci_dev *pci)
+diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h
+index 2acfff3da1a0..3fb119f09040 100644
+--- a/sound/pci/hda/hda_intel.h
++++ b/sound/pci/hda/hda_intel.h
+@@ -27,6 +27,7 @@ struct hda_intel {
+ unsigned int use_vga_switcheroo:1;
+ unsigned int vga_switcheroo_registered:1;
+ unsigned int init_failed:1; /* delayed init failed */
++ unsigned int freed:1; /* resources already released */
+
+ bool need_i915_power:1; /* the hda controller needs i915 power */
+ };
+diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
+index 1554631cb397..5b7f9fcf6cbf 100644
+--- a/sound/soc/codecs/tas571x.c
++++ b/sound/soc/codecs/tas571x.c
+@@ -820,8 +820,10 @@ static int tas571x_i2c_probe(struct i2c_client *client,
+
+ priv->regmap = devm_regmap_init(dev, NULL, client,
+ priv->chip->regmap_config);
+- if (IS_ERR(priv->regmap))
+- return PTR_ERR(priv->regmap);
++ if (IS_ERR(priv->regmap)) {
++ ret = PTR_ERR(priv->regmap);
++ goto disable_regs;
++ }
+
+ priv->pdn_gpio = devm_gpiod_get_optional(dev, "pdn", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->pdn_gpio)) {
+@@ -845,7 +847,7 @@ static int tas571x_i2c_probe(struct i2c_client *client,
+
+ ret = regmap_write(priv->regmap, TAS571X_OSC_TRIM_REG, 0);
+ if (ret)
+- return ret;
++ goto disable_regs;
+
+ usleep_range(50000, 60000);
+
+@@ -861,12 +863,20 @@ static int tas571x_i2c_probe(struct i2c_client *client,
+ */
+ ret = regmap_update_bits(priv->regmap, TAS571X_MVOL_REG, 1, 0);
+ if (ret)
+- return ret;
++ goto disable_regs;
+ }
+
+- return devm_snd_soc_register_component(&client->dev,
++ ret = devm_snd_soc_register_component(&client->dev,
+ &priv->component_driver,
+ &tas571x_dai, 1);
++ if (ret)
++ goto disable_regs;
++
++ return ret;
++
++disable_regs:
++ regulator_bulk_disable(priv->chip->num_supply_names, priv->supplies);
++ return ret;
+ }
+
+ static int tas571x_i2c_remove(struct i2c_client *client)
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index 55112c1bba5e..6cf0f6612bda 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -860,8 +860,7 @@ static int wm8960_hw_params(struct snd_pcm_substream *substream,
+
+ wm8960->is_stream_in_use[tx] = true;
+
+- if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_ON &&
+- !wm8960->is_stream_in_use[!tx])
++ if (!wm8960->is_stream_in_use[!tx])
+ return wm8960_configure_clocking(component);
+
+ return 0;
+diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
+index 1f698adde506..2b04ac3d8fd3 100644
+--- a/sound/soc/meson/axg-card.c
++++ b/sound/soc/meson/axg-card.c
+@@ -586,8 +586,10 @@ static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np,
+
+ if (axg_card_cpu_is_tdm_iface(dai_link->cpus->of_node))
+ ret = axg_card_parse_tdm(card, np, index);
+- else if (axg_card_cpu_is_codec(dai_link->cpus->of_node))
++ else if (axg_card_cpu_is_codec(dai_link->cpus->of_node)) {
+ dai_link->params = &codec_params;
++ dai_link->no_pcm = 0; /* link is not a DPCM BE */
++ }
+
+ return ret;
+ }
+diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
+index c1a7624eaf17..2a5302f1db98 100644
+--- a/sound/soc/qcom/qdsp6/q6afe-dai.c
++++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
+@@ -902,6 +902,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
++ .channels_min = 1,
++ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+@@ -917,6 +919,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
++ .channels_min = 1,
++ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+@@ -931,6 +935,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
++ .channels_min = 1,
++ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+@@ -946,6 +952,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
++ .channels_min = 1,
++ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+@@ -960,6 +968,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
++ .channels_min = 1,
++ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+@@ -975,6 +985,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
++ .channels_min = 1,
++ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+@@ -989,6 +1001,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
++ .channels_min = 1,
++ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+@@ -1004,6 +1018,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
++ .channels_min = 1,
++ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
+index 593be1b668d6..b3e12d6a78a1 100644
+--- a/sound/soc/samsung/s3c-i2s-v2.c
++++ b/sound/soc/samsung/s3c-i2s-v2.c
+@@ -656,60 +656,6 @@ void s3c_i2sv2_cleanup(struct snd_soc_dai *dai,
+ }
+ EXPORT_SYMBOL_GPL(s3c_i2sv2_cleanup);
+
+-#ifdef CONFIG_PM
+-static int s3c2412_i2s_suspend(struct snd_soc_dai *dai)
+-{
+- struct s3c_i2sv2_info *i2s = to_info(dai);
+- u32 iismod;
+-
+- if (dai->active) {
+- i2s->suspend_iismod = readl(i2s->regs + S3C2412_IISMOD);
+- i2s->suspend_iiscon = readl(i2s->regs + S3C2412_IISCON);
+- i2s->suspend_iispsr = readl(i2s->regs + S3C2412_IISPSR);
+-
+- /* some basic suspend checks */
+-
+- iismod = readl(i2s->regs + S3C2412_IISMOD);
+-
+- if (iismod & S3C2412_IISCON_RXDMA_ACTIVE)
+- pr_warn("%s: RXDMA active?\n", __func__);
+-
+- if (iismod & S3C2412_IISCON_TXDMA_ACTIVE)
+- pr_warn("%s: TXDMA active?\n", __func__);
+-
+- if (iismod & S3C2412_IISCON_IIS_ACTIVE)
+- pr_warn("%s: IIS active\n", __func__);
+- }
+-
+- return 0;
+-}
+-
+-static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
+-{
+- struct s3c_i2sv2_info *i2s = to_info(dai);
+-
+- pr_info("dai_active %d, IISMOD %08x, IISCON %08x\n",
+- dai->active, i2s->suspend_iismod, i2s->suspend_iiscon);
+-
+- if (dai->active) {
+- writel(i2s->suspend_iiscon, i2s->regs + S3C2412_IISCON);
+- writel(i2s->suspend_iismod, i2s->regs + S3C2412_IISMOD);
+- writel(i2s->suspend_iispsr, i2s->regs + S3C2412_IISPSR);
+-
+- writel(S3C2412_IISFIC_RXFLUSH | S3C2412_IISFIC_TXFLUSH,
+- i2s->regs + S3C2412_IISFIC);
+-
+- ndelay(250);
+- writel(0x0, i2s->regs + S3C2412_IISFIC);
+- }
+-
+- return 0;
+-}
+-#else
+-#define s3c2412_i2s_suspend NULL
+-#define s3c2412_i2s_resume NULL
+-#endif
+-
+ int s3c_i2sv2_register_component(struct device *dev, int id,
+ const struct snd_soc_component_driver *cmp_drv,
+ struct snd_soc_dai_driver *dai_drv)
+@@ -727,9 +673,6 @@ int s3c_i2sv2_register_component(struct device *dev, int id,
+ if (!ops->delay)
+ ops->delay = s3c2412_i2s_delay;
+
+- dai_drv->suspend = s3c2412_i2s_suspend;
+- dai_drv->resume = s3c2412_i2s_resume;
+-
+ return devm_snd_soc_register_component(dev, cmp_drv, dai_drv, 1);
+ }
+ EXPORT_SYMBOL_GPL(s3c_i2sv2_register_component);
+diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
+index 787a3f6e9f24..b35d828c1cfe 100644
+--- a/sound/soc/samsung/s3c2412-i2s.c
++++ b/sound/soc/samsung/s3c2412-i2s.c
+@@ -117,6 +117,60 @@ static int s3c2412_i2s_hw_params(struct snd_pcm_substream *substream,
+ return 0;
+ }
+
++#ifdef CONFIG_PM
++static int s3c2412_i2s_suspend(struct snd_soc_component *component)
++{
++ struct s3c_i2sv2_info *i2s = snd_soc_component_get_drvdata(component);
++ u32 iismod;
++
++ if (component->active) {
++ i2s->suspend_iismod = readl(i2s->regs + S3C2412_IISMOD);
++ i2s->suspend_iiscon = readl(i2s->regs + S3C2412_IISCON);
++ i2s->suspend_iispsr = readl(i2s->regs + S3C2412_IISPSR);
++
++ /* some basic suspend checks */
++
++ iismod = readl(i2s->regs + S3C2412_IISMOD);
++
++ if (iismod & S3C2412_IISCON_RXDMA_ACTIVE)
++ pr_warn("%s: RXDMA active?\n", __func__);
++
++ if (iismod & S3C2412_IISCON_TXDMA_ACTIVE)
++ pr_warn("%s: TXDMA active?\n", __func__);
++
++ if (iismod & S3C2412_IISCON_IIS_ACTIVE)
++ pr_warn("%s: IIS active\n", __func__);
++ }
++
++ return 0;
++}
++
++static int s3c2412_i2s_resume(struct snd_soc_component *component)
++{
++ struct s3c_i2sv2_info *i2s = snd_soc_component_get_drvdata(component);
++
++ pr_info("component_active %d, IISMOD %08x, IISCON %08x\n",
++ component->active, i2s->suspend_iismod, i2s->suspend_iiscon);
++
++ if (component->active) {
++ writel(i2s->suspend_iiscon, i2s->regs + S3C2412_IISCON);
++ writel(i2s->suspend_iismod, i2s->regs + S3C2412_IISMOD);
++ writel(i2s->suspend_iispsr, i2s->regs + S3C2412_IISPSR);
++
++ writel(S3C2412_IISFIC_RXFLUSH | S3C2412_IISFIC_TXFLUSH,
++ i2s->regs + S3C2412_IISFIC);
++
++ ndelay(250);
++ writel(0x0, i2s->regs + S3C2412_IISFIC);
++ }
++
++ return 0;
++}
++#else
++#define s3c2412_i2s_suspend NULL
++#define s3c2412_i2s_resume NULL
++#endif
++
+ #define S3C2412_I2S_RATES \
+ (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+@@ -146,6 +200,8 @@ static struct snd_soc_dai_driver s3c2412_i2s_dai = {
+
+ static const struct snd_soc_component_driver s3c2412_i2s_component = {
+ .name = "s3c2412-i2s",
++ .suspend = s3c2412_i2s_suspend,
++ .resume = s3c2412_i2s_resume,
+ };
+
+ static int s3c2412_iis_dev_probe(struct platform_device *pdev)
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 068d809c349a..b17366bac846 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -1256,8 +1256,18 @@ static int soc_probe_component(struct snd_soc_card *card,
+ ret = snd_soc_dapm_add_routes(dapm,
+ component->driver->dapm_routes,
+ component->driver->num_dapm_routes);
+- if (ret < 0)
+- goto err_probe;
++ if (ret < 0) {
++ if (card->disable_route_checks) {
++ dev_info(card->dev,
++ "%s: disable_route_checks set, ignoring errors on add_routes\n",
++ __func__);
++ } else {
++ dev_err(card->dev,
++ "%s: snd_soc_dapm_add_routes failed: %d\n",
++ __func__, ret);
++ goto err_probe;
++ }
++ }
+
+ /* see for_each_card_components */
+ list_add(&component->card_list, &card->component_dev_list);
+@@ -1938,8 +1948,18 @@ static int snd_soc_bind_card(struct snd_soc_card *card)
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes,
+ card->num_dapm_routes);
+- if (ret < 0)
+- goto probe_end;
++ if (ret < 0) {
++ if (card->disable_route_checks) {
++ dev_info(card->dev,
++ "%s: disable_route_checks set, ignoring errors on add_routes\n",
++ __func__);
++ } else {
++ dev_err(card->dev,
++ "%s: snd_soc_dapm_add_routes failed: %d\n",
++ __func__, ret);
++ goto probe_end;
++ }
++ }
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, card->of_dapm_routes,
+ card->num_of_dapm_routes);
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 8f6f0ad50288..10e2305bb885 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -2890,22 +2890,19 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
+ capture = rtd->dai_link->dpcm_capture;
+ } else {
+ /* Adapt stream for codec2codec links */
+- struct snd_soc_pcm_stream *cpu_capture = rtd->dai_link->params ?
+- &cpu_dai->driver->playback : &cpu_dai->driver->capture;
+- struct snd_soc_pcm_stream *cpu_playback = rtd->dai_link->params ?
+- &cpu_dai->driver->capture : &cpu_dai->driver->playback;
++ int cpu_capture = rtd->dai_link->params ?
++ SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
++ int cpu_playback = rtd->dai_link->params ?
++ SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_PLAYBACK) &&
+- snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_CAPTURE))
++ snd_soc_dai_stream_valid(cpu_dai, cpu_playback))
+ playback = 1;
+ if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_CAPTURE) &&
+- snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_PLAYBACK))
++ snd_soc_dai_stream_valid(cpu_dai, cpu_capture))
+ capture = 1;
+ }
+-
+- capture = capture && cpu_capture->channels_min;
+- playback = playback && cpu_playback->channels_min;
+ }
+
+ if (rtd->dai_link->playback_only) {
+diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
+index d3259de43712..7e965848796c 100644
+--- a/sound/soc/stm/stm32_sai_sub.c
++++ b/sound/soc/stm/stm32_sai_sub.c
+@@ -1543,6 +1543,9 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++ if (STM_SAI_PROTOCOL_IS_SPDIF(sai))
++ conf = &stm32_sai_pcm_config_spdif;
++
+ ret = snd_dmaengine_pcm_register(&pdev->dev, conf, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register pcm dma\n");
+@@ -1551,15 +1554,10 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
+
+ ret = snd_soc_register_component(&pdev->dev, &stm32_component,
+ &sai->cpu_dai_drv, 1);
+- if (ret) {
++ if (ret)
+ snd_dmaengine_pcm_unregister(&pdev->dev);
+- return ret;
+- }
+-
+- if (STM_SAI_PROTOCOL_IS_SPDIF(sai))
+- conf = &stm32_sai_pcm_config_spdif;
+
+- return 0;
++ return ret;
+ }
+
+ static int stm32_sai_sub_remove(struct platform_device *pdev)
+diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c
+index 3769d9ce5dbe..e6e75897cce8 100644
+--- a/sound/soc/stm/stm32_spdifrx.c
++++ b/sound/soc/stm/stm32_spdifrx.c
+@@ -1009,6 +1009,8 @@ static int stm32_spdifrx_probe(struct platform_device *pdev)
+
+ if (idr == SPDIFRX_IPIDR_NUMBER) {
+ ret = regmap_read(spdifrx->regmap, STM32_SPDIFRX_VERR, &ver);
++ if (ret)
++ goto error;
+
+ dev_dbg(&pdev->dev, "SPDIFRX version: %lu.%lu registered\n",
+ FIELD_GET(SPDIFRX_VERR_MAJ_MASK, ver),
+diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
+index c364e4be5e6e..c1a7fc185940 100644
+--- a/tools/lib/bpf/netlink.c
++++ b/tools/lib/bpf/netlink.c
+@@ -141,7 +141,7 @@ int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
+ struct ifinfomsg ifinfo;
+ char attrbuf[64];
+ } req;
+- __u32 nl_pid;
++ __u32 nl_pid = 0;
+
+ sock = libbpf_netlink_open(&nl_pid);
+ if (sock < 0)
+@@ -256,7 +256,7 @@ int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
+ {
+ struct xdp_id_md xdp_id = {};
+ int sock, ret;
+- __u32 nl_pid;
++ __u32 nl_pid = 0;
+ __u32 mask;
+
+ if (flags & ~XDP_FLAGS_MASK || !info_size)
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 2b765bbbef92..95c485d3d4d8 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -2307,14 +2307,27 @@ static bool ignore_unreachable_insn(struct instruction *insn)
+ !strcmp(insn->sec->name, ".altinstr_aux"))
+ return true;
+
++ if (!insn->func)
++ return false;
++
++ /*
++ * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
++ * __builtin_unreachable(). The BUG() macro has an unreachable() after
++ * the UD2, which causes GCC's undefined trap logic to emit another UD2
++ * (or occasionally a JMP to UD2).
++ */
++ if (list_prev_entry(insn, list)->dead_end &&
++ (insn->type == INSN_BUG ||
++ (insn->type == INSN_JUMP_UNCONDITIONAL &&
++ insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
++ return true;
++
+ /*
+ * Check if this (or a subsequent) instruction is related to
+ * CONFIG_UBSAN or CONFIG_KASAN.
+ *
+ * End the search at 5 instructions to avoid going into the weeds.
+ */
+- if (!insn->func)
+- return false;
+ for (i = 0; i < 5; i++) {
+
+ if (is_kasan_insn(insn) || is_ubsan_insn(insn))
+diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
+index 13ccf775a83a..ba4cbb1cdd63 100644
+--- a/tools/objtool/orc_dump.c
++++ b/tools/objtool/orc_dump.c
+@@ -66,7 +66,7 @@ int orc_dump(const char *_objname)
+ char *name;
+ size_t nr_sections;
+ Elf64_Addr orc_ip_addr = 0;
+- size_t shstrtab_idx;
++ size_t shstrtab_idx, strtab_idx = 0;
+ Elf *elf;
+ Elf_Scn *scn;
+ GElf_Shdr sh;
+@@ -127,6 +127,8 @@ int orc_dump(const char *_objname)
+
+ if (!strcmp(name, ".symtab")) {
+ symtab = data;
++ } else if (!strcmp(name, ".strtab")) {
++ strtab_idx = i;
+ } else if (!strcmp(name, ".orc_unwind")) {
+ orc = data->d_buf;
+ orc_size = sh.sh_size;
+@@ -138,7 +140,7 @@ int orc_dump(const char *_objname)
+ }
+ }
+
+- if (!symtab || !orc || !orc_ip)
++ if (!symtab || !strtab_idx || !orc || !orc_ip)
+ return 0;
+
+ if (orc_size % sizeof(*orc) != 0) {
+@@ -159,21 +161,29 @@ int orc_dump(const char *_objname)
+ return -1;
+ }
+
+- scn = elf_getscn(elf, sym.st_shndx);
+- if (!scn) {
+- WARN_ELF("elf_getscn");
+- return -1;
+- }
+-
+- if (!gelf_getshdr(scn, &sh)) {
+- WARN_ELF("gelf_getshdr");
+- return -1;
+- }
+-
+- name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
+- if (!name || !*name) {
+- WARN_ELF("elf_strptr");
+- return -1;
++ if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) {
++ scn = elf_getscn(elf, sym.st_shndx);
++ if (!scn) {
++ WARN_ELF("elf_getscn");
++ return -1;
++ }
++
++ if (!gelf_getshdr(scn, &sh)) {
++ WARN_ELF("gelf_getshdr");
++ return -1;
++ }
++
++ name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
++ if (!name) {
++ WARN_ELF("elf_strptr");
++ return -1;
++ }
++ } else {
++ name = elf_strptr(elf, strtab_idx, sym.st_name);
++ if (!name) {
++ WARN_ELF("elf_strptr");
++ return -1;
++ }
+ }
+
+ printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
+diff --git a/tools/testing/selftests/bpf/progs/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
+index 88b0566da13d..31538c9ed193 100644
+--- a/tools/testing/selftests/bpf/progs/test_btf_haskv.c
++++ b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
+@@ -20,20 +20,12 @@ struct bpf_map_def SEC("maps") btf_map = {
+
+ BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts);
+
+-struct dummy_tracepoint_args {
+- unsigned long long pad;
+- struct sock *sock;
+-};
+-
+ __attribute__((noinline))
+-int test_long_fname_2(struct dummy_tracepoint_args *arg)
++int test_long_fname_2(void)
+ {
+ struct ipv_counts *counts;
+ int key = 0;
+
+- if (!arg->sock)
+- return 0;
+-
+ counts = bpf_map_lookup_elem(&btf_map, &key);
+ if (!counts)
+ return 0;
+@@ -44,15 +36,15 @@ int test_long_fname_2(struct dummy_tracepoint_args *arg)
+ }
+
+ __attribute__((noinline))
+-int test_long_fname_1(struct dummy_tracepoint_args *arg)
++int test_long_fname_1(void)
+ {
+- return test_long_fname_2(arg);
++ return test_long_fname_2();
+ }
+
+ SEC("dummy_tracepoint")
+-int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
++int _dummy_tracepoint(void *arg)
+ {
+- return test_long_fname_1(arg);
++ return test_long_fname_1();
+ }
+
+ char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/test_btf_newkv.c b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
+index a924e53c8e9d..6c5560162746 100644
+--- a/tools/testing/selftests/bpf/progs/test_btf_newkv.c
++++ b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
+@@ -28,20 +28,12 @@ struct {
+ __type(value, struct ipv_counts);
+ } btf_map SEC(".maps");
+
+-struct dummy_tracepoint_args {
+- unsigned long long pad;
+- struct sock *sock;
+-};
+-
+ __attribute__((noinline))
+-int test_long_fname_2(struct dummy_tracepoint_args *arg)
++int test_long_fname_2(void)
+ {
+ struct ipv_counts *counts;
+ int key = 0;
+
+- if (!arg->sock)
+- return 0;
+-
+ counts = bpf_map_lookup_elem(&btf_map, &key);
+ if (!counts)
+ return 0;
+@@ -57,15 +49,15 @@ int test_long_fname_2(struct dummy_tracepoint_args *arg)
+ }
+
+ __attribute__((noinline))
+-int test_long_fname_1(struct dummy_tracepoint_args *arg)
++int test_long_fname_1(void)
+ {
+- return test_long_fname_2(arg);
++ return test_long_fname_2();
+ }
+
+ SEC("dummy_tracepoint")
+-int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
++int _dummy_tracepoint(void *arg)
+ {
+- return test_long_fname_1(arg);
++ return test_long_fname_1();
+ }
+
+ char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
+index 983aedd1c072..506da7fd2da2 100644
+--- a/tools/testing/selftests/bpf/progs/test_btf_nokv.c
++++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
+@@ -17,20 +17,12 @@ struct bpf_map_def SEC("maps") btf_map = {
+ .max_entries = 4,
+ };
+
+-struct dummy_tracepoint_args {
+- unsigned long long pad;
+- struct sock *sock;
+-};
+-
+ __attribute__((noinline))
+-int test_long_fname_2(struct dummy_tracepoint_args *arg)
++int test_long_fname_2(void)
+ {
+ struct ipv_counts *counts;
+ int key = 0;
+
+- if (!arg->sock)
+- return 0;
+-
+ counts = bpf_map_lookup_elem(&btf_map, &key);
+ if (!counts)
+ return 0;
+@@ -41,15 +33,15 @@ int test_long_fname_2(struct dummy_tracepoint_args *arg)
+ }
+
+ __attribute__((noinline))
+-int test_long_fname_1(struct dummy_tracepoint_args *arg)
++int test_long_fname_1(void)
+ {
+- return test_long_fname_2(arg);
++ return test_long_fname_2();
+ }
+
+ SEC("dummy_tracepoint")
+-int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
++int _dummy_tracepoint(void *arg)
+ {
+- return test_long_fname_1(arg);
++ return test_long_fname_1();
+ }
+
+ char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
+index 8da77cda5f4a..305fae8f80a9 100644
+--- a/tools/testing/selftests/bpf/test_btf.c
++++ b/tools/testing/selftests/bpf/test_btf.c
+@@ -2854,7 +2854,7 @@ static struct btf_raw_test raw_tests[] = {
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+- .err_str = "vlen != 0",
++ .err_str = "Invalid func linkage",
+ },
+
+ {
+diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
+index 7f6c232cd842..ed1c2cea1dea 100644
+--- a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
++++ b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
+@@ -88,6 +88,7 @@
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
++ .errstr_unpriv = "leaking pointer from stack off -8",
+ .errstr = "R0 invalid mem access 'inv'",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,