From 9dca87bbb73f85f2bea7d939047158ff44930ace Mon Sep 17 00:00:00 2001 From: Mike Pagano Date: Wed, 2 Mar 2022 08:04:21 -0500 Subject: Linux patch 5.16.12 Signed-off-by: Mike Pagano --- 0000_README | 4 + 1011_linux-5.16.12.patch | 6431 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 6435 insertions(+) create mode 100644 1011_linux-5.16.12.patch diff --git a/0000_README b/0000_README index 7706410d..0785204e 100644 --- a/0000_README +++ b/0000_README @@ -87,6 +87,10 @@ Patch: 1010_linux-5.16.11.patch From: http://www.kernel.org Desc: Linux 5.16.11 +Patch: 1011_linux-5.16.12.patch +From: http://www.kernel.org +Desc: Linux 5.16.12 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1011_linux-5.16.12.patch b/1011_linux-5.16.12.patch new file mode 100644 index 00000000..3b6bed0f --- /dev/null +++ b/1011_linux-5.16.12.patch @@ -0,0 +1,6431 @@ +diff --git a/Makefile b/Makefile +index 00ba75768af73..09a9bb824afad 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 16 +-SUBLEVEL = 11 ++SUBLEVEL = 12 + EXTRAVERSION = + NAME = Gobble Gobble + +diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c +index 237d20dd5622d..286cec4d86d7b 100644 +--- a/arch/parisc/kernel/unaligned.c ++++ b/arch/parisc/kernel/unaligned.c +@@ -340,7 +340,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop) + : "r" (val), "r" (regs->ior), "r" (regs->isr) + : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER ); + +- return 0; ++ return ret; + } + static int emulate_std(struct pt_regs *regs, int frreg, int flop) + { +@@ -397,7 +397,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) + __asm__ __volatile__ ( + " mtsp %4, %%sr1\n" + " zdep %2, 29, 2, %%r19\n" +-" dep %%r0, 31, 2, %2\n" ++" dep %%r0, 31, 2, %3\n" + " mtsar %%r19\n" + " zvdepi -2, 32, %%r19\n" + "1: ldw 0(%%sr1,%3),%%r20\n" +@@ -409,7 +409,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) + " andcm %%r21, %%r19, %%r21\n" + " or %1, %%r20, %1\n" + " or %2, %%r21, %2\n" +-"3: stw %1,0(%%sr1,%1)\n" ++"3: stw %1,0(%%sr1,%3)\n" + "4: stw %%r1,4(%%sr1,%3)\n" + "5: stw %2,8(%%sr1,%3)\n" + " copy %%r0, %0\n" +@@ -596,7 +596,6 @@ void handle_unaligned(struct pt_regs *regs) + ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */ + break; + } +-#ifdef CONFIG_PA20 + switch (regs->iir & OPCODE2_MASK) + { + case OPCODE_FLDD_L: +@@ -607,22 +606,23 @@ void handle_unaligned(struct pt_regs *regs) + flop=1; + ret = emulate_std(regs, R2(regs->iir),1); + break; ++#ifdef CONFIG_PA20 + case OPCODE_LDD_L: + ret = emulate_ldd(regs, R2(regs->iir),0); + break; + case OPCODE_STD_L: + ret = emulate_std(regs, R2(regs->iir),0); + break; +- } + #endif ++ } + switch (regs->iir & OPCODE3_MASK) + { + case OPCODE_FLDW_L: + flop=1; +- ret = emulate_ldw(regs, R2(regs->iir),0); ++ ret = emulate_ldw(regs, R2(regs->iir), 1); + break; + case OPCODE_LDW_M: +- ret = emulate_ldw(regs, R2(regs->iir),1); ++ ret = emulate_ldw(regs, R2(regs->iir), 0); + break; + + case OPCODE_FSTW_L: +diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig +index d68b743d580f8..15d1fd0a70184 100644 +--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig ++++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig +@@ -23,7 +23,7 @@ CONFIG_SLOB=y + CONFIG_SOC_CANAAN=y + CONFIG_SMP=y + CONFIG_NR_CPUS=2 +-CONFIG_CMDLINE="earlycon console=ttySIF0 rootdelay=2 root=/dev/mmcblk0p1 ro" ++CONFIG_CMDLINE="earlycon console=ttySIF0 root=/dev/mmcblk0p1 rootwait ro" + CONFIG_CMDLINE_FORCE=y + # CONFIG_SECCOMP is not set + # CONFIG_STACKPROTECTOR is not set +diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile +index 3397ddac1a30c..16308ef1e5787 100644 +--- a/arch/riscv/kernel/Makefile ++++ b/arch/riscv/kernel/Makefile +@@ -50,6 +50,8 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o + obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o + obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o + ++obj-$(CONFIG_TRACE_IRQFLAGS) += trace_irq.o ++ + obj-$(CONFIG_RISCV_BASE_PMU) += perf_event.o + obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o + obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o +diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S +index ed29e9c8f660c..d6a46ed0bf051 100644 +--- a/arch/riscv/kernel/entry.S ++++ b/arch/riscv/kernel/entry.S +@@ -108,7 +108,7 @@ _save_context: + .option pop + + #ifdef CONFIG_TRACE_IRQFLAGS +- call trace_hardirqs_off ++ call __trace_hardirqs_off + #endif + + #ifdef CONFIG_CONTEXT_TRACKING +@@ -143,7 +143,7 @@ skip_context_tracking: + li t0, EXC_BREAKPOINT + beq s4, t0, 1f + #ifdef CONFIG_TRACE_IRQFLAGS +- call trace_hardirqs_on ++ call __trace_hardirqs_on + #endif + csrs CSR_STATUS, SR_IE + +@@ -234,7 +234,7 @@ ret_from_exception: + REG_L s0, PT_STATUS(sp) + csrc CSR_STATUS, SR_IE + #ifdef CONFIG_TRACE_IRQFLAGS +- call trace_hardirqs_off ++ call __trace_hardirqs_off + #endif + #ifdef CONFIG_RISCV_M_MODE + /* the MPP value is too large to be used as an immediate arg for addi */ +@@ -270,10 +270,10 @@ restore_all: + REG_L s1, PT_STATUS(sp) + andi t0, s1, SR_PIE + beqz t0, 1f +- call trace_hardirqs_on ++ call __trace_hardirqs_on + j 2f + 1: +- call trace_hardirqs_off ++ call __trace_hardirqs_off + 2: + #endif + REG_L a0, PT_STATUS(sp) +diff --git a/arch/riscv/kernel/trace_irq.c b/arch/riscv/kernel/trace_irq.c +new file mode 100644 +index 0000000000000..095ac976d7da1 +--- /dev/null ++++ b/arch/riscv/kernel/trace_irq.c +@@ -0,0 +1,27 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2022 Changbin Du ++ */ ++ ++#include ++#include ++#include "trace_irq.h" ++ ++/* ++ * trace_hardirqs_on/off require the caller to setup frame pointer properly. ++ * Otherwise, CALLER_ADDR1 might trigger an pagging exception in kernel. ++ * Here we add one extra level so they can be safely called by low ++ * level entry code which $fp is used for other purpose. ++ */ ++ ++void __trace_hardirqs_on(void) ++{ ++ trace_hardirqs_on(); ++} ++NOKPROBE_SYMBOL(__trace_hardirqs_on); ++ ++void __trace_hardirqs_off(void) ++{ ++ trace_hardirqs_off(); ++} ++NOKPROBE_SYMBOL(__trace_hardirqs_off); +diff --git a/arch/riscv/kernel/trace_irq.h b/arch/riscv/kernel/trace_irq.h +new file mode 100644 +index 0000000000000..99fe67377e5ed +--- /dev/null ++++ b/arch/riscv/kernel/trace_irq.h +@@ -0,0 +1,11 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2022 Changbin Du ++ */ ++#ifndef __TRACE_IRQ_H ++#define __TRACE_IRQ_H ++ ++void __trace_hardirqs_on(void); ++void __trace_hardirqs_off(void); ++ ++#endif /* __TRACE_IRQ_H */ +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c +index fcdf3f8bb59a6..84e23b9864f4c 100644 +--- a/arch/x86/kvm/mmu/mmu.c ++++ b/arch/x86/kvm/mmu/mmu.c +@@ -3905,12 +3905,23 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) + walk_shadow_page_lockless_end(vcpu); + } + ++static u32 alloc_apf_token(struct kvm_vcpu *vcpu) ++{ ++ /* make sure the token value is not 0 */ ++ u32 id = vcpu->arch.apf.id; ++ ++ if (id << 12 == 0) ++ vcpu->arch.apf.id = 1; ++ ++ return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; ++} ++ + static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + gfn_t gfn) + { + struct kvm_arch_async_pf arch; + +- arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; ++ arch.token = alloc_apf_token(vcpu); + arch.gfn = gfn; + arch.direct_map = vcpu->arch.mmu->direct_map; + arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu); +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index 57e2a55e46175..9875c4cc3c768 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -2903,8 +2903,23 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) + u64 data = msr->data; + switch (ecx) { + case MSR_AMD64_TSC_RATIO: +- if (!msr->host_initiated && !svm->tsc_scaling_enabled) +- return 1; ++ ++ if (!svm->tsc_scaling_enabled) { ++ ++ if (!msr->host_initiated) ++ return 1; ++ /* ++ * In case TSC scaling is not enabled, always ++ * leave this MSR at the default value. ++ * ++ * Due to bug in qemu 6.2.0, it would try to set ++ * this msr to 0 if tsc scaling is not enabled. ++ * Ignore this value as well. ++ */ ++ if (data != 0 && data != svm->tsc_ratio_msr) ++ return 1; ++ break; ++ } + + if (data & TSC_RATIO_RSVD) + return 1; +diff --git a/block/fops.c b/block/fops.c +index 0da147edbd186..77a5579d8de66 100644 +--- a/block/fops.c ++++ b/block/fops.c +@@ -289,6 +289,8 @@ static void blkdev_bio_end_io_async(struct bio *bio) + struct kiocb *iocb = dio->iocb; + ssize_t ret; + ++ WRITE_ONCE(iocb->private, NULL); ++ + if (likely(!bio->bi_status)) { + ret = dio->size; + iocb->ki_pos += ret; +diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c +index f242157bc81bb..ae8375e9d2681 100644 +--- a/drivers/ata/pata_hpt37x.c ++++ b/drivers/ata/pata_hpt37x.c +@@ -919,6 +919,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) + irqmask &= ~0x10; + pci_write_config_byte(dev, 0x5a, irqmask); + ++ /* ++ * HPT371 chips physically have only one channel, the secondary one, ++ * but the primary channel registers do exist! Go figure... ++ * So, we manually disable the non-existing channel here ++ * (if the BIOS hasn't done this already). ++ */ ++ if (dev->device == PCI_DEVICE_ID_TTI_HPT371) { ++ u8 mcr1; ++ ++ pci_read_config_byte(dev, 0x50, &mcr1); ++ mcr1 &= ~0x04; ++ pci_write_config_byte(dev, 0x50, mcr1); ++ } ++ + /* + * default to pci clock. make sure MA15/16 are set to output + * to prevent drives having problems with 40-pin cables. Needed +diff --git a/drivers/base/dd.c b/drivers/base/dd.c +index 68ea1f949daa9..6b66306932016 100644 +--- a/drivers/base/dd.c ++++ b/drivers/base/dd.c +@@ -629,6 +629,9 @@ re_probe: + drv->remove(dev); + + devres_release_all(dev); ++ arch_teardown_dma_ops(dev); ++ kfree(dev->dma_range_map); ++ dev->dma_range_map = NULL; + driver_sysfs_remove(dev); + dev->driver = NULL; + dev_set_drvdata(dev, NULL); +@@ -1208,6 +1211,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) + + devres_release_all(dev); + arch_teardown_dma_ops(dev); ++ kfree(dev->dma_range_map); ++ dev->dma_range_map = NULL; + dev->driver = NULL; + dev_set_drvdata(dev, NULL); + if (dev->pm_domain && dev->pm_domain->dismiss) +diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c +index d2656581a6085..4a446259a184e 100644 +--- a/drivers/base/regmap/regmap-irq.c ++++ b/drivers/base/regmap/regmap-irq.c +@@ -189,11 +189,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) + ret = regmap_write(map, reg, d->mask_buf[i]); + if (d->chip->clear_ack) { + if (d->chip->ack_invert && !ret) +- ret = regmap_write(map, reg, +- d->mask_buf[i]); ++ ret = regmap_write(map, reg, UINT_MAX); + else if (!ret) +- ret = regmap_write(map, reg, +- ~d->mask_buf[i]); ++ ret = regmap_write(map, reg, 0); + } + if (ret != 0) + dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", +@@ -556,11 +554,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) + data->status_buf[i]); + if (chip->clear_ack) { + if (chip->ack_invert && !ret) +- ret = regmap_write(map, reg, +- data->status_buf[i]); ++ ret = regmap_write(map, reg, UINT_MAX); + else if (!ret) +- ret = regmap_write(map, reg, +- ~data->status_buf[i]); ++ ret = regmap_write(map, reg, 0); + } + if (ret != 0) + dev_err(map->dev, "Failed to ack 0x%x: %d\n", +@@ -817,13 +813,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, + d->status_buf[i] & d->mask_buf[i]); + if (chip->clear_ack) { + if (chip->ack_invert && !ret) +- ret = regmap_write(map, reg, +- (d->status_buf[i] & +- d->mask_buf[i])); ++ ret = regmap_write(map, reg, UINT_MAX); + else if (!ret) +- ret = regmap_write(map, reg, +- ~(d->status_buf[i] & +- d->mask_buf[i])); ++ ret = regmap_write(map, reg, 0); + } + if (ret != 0) { + dev_err(map->dev, "Failed to ack 0x%x: %d\n", +diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c +index 744d136b721bc..15d61793f53b1 100644 +--- a/drivers/clk/ingenic/jz4725b-cgu.c ++++ b/drivers/clk/ingenic/jz4725b-cgu.c +@@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { + }, + + [JZ4725B_CLK_I2S] = { +- "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, ++ "i2s", CGU_CLK_MUX | CGU_CLK_DIV, + .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 }, + .mux = { CGU_REG_CPCCR, 31, 1 }, + .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 }, +- .gate = { CGU_REG_CLKGR, 6 }, + }, + + [JZ4725B_CLK_SPI] = { +diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c +index 702a9bdc05598..5df9f1ead48e0 100644 +--- a/drivers/clk/qcom/gcc-msm8994.c ++++ b/drivers/clk/qcom/gcc-msm8994.c +@@ -107,42 +107,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = { + { .hw = &gpll4.clkr.hw }, + }; + +-static struct clk_rcg2 system_noc_clk_src = { +- .cmd_rcgr = 0x0120, +- .hid_width = 5, +- .parent_map = gcc_xo_gpll0_map, +- .clkr.hw.init = &(struct clk_init_data){ +- .name = "system_noc_clk_src", +- .parent_data = gcc_xo_gpll0, +- .num_parents = ARRAY_SIZE(gcc_xo_gpll0), +- .ops = &clk_rcg2_ops, +- }, +-}; +- +-static struct clk_rcg2 config_noc_clk_src = { +- .cmd_rcgr = 0x0150, +- .hid_width = 5, +- .parent_map = gcc_xo_gpll0_map, +- .clkr.hw.init = &(struct clk_init_data){ +- .name = "config_noc_clk_src", +- .parent_data = gcc_xo_gpll0, +- .num_parents = ARRAY_SIZE(gcc_xo_gpll0), +- .ops = &clk_rcg2_ops, +- }, +-}; +- +-static struct clk_rcg2 periph_noc_clk_src = { +- .cmd_rcgr = 0x0190, +- .hid_width = 5, +- .parent_map = gcc_xo_gpll0_map, +- .clkr.hw.init = &(struct clk_init_data){ +- .name = "periph_noc_clk_src", +- .parent_data = gcc_xo_gpll0, +- .num_parents = ARRAY_SIZE(gcc_xo_gpll0), +- .ops = &clk_rcg2_ops, +- }, +-}; +- + static struct freq_tbl ftbl_ufs_axi_clk_src[] = { + F(50000000, P_GPLL0, 12, 0, 0), + F(100000000, P_GPLL0, 6, 0, 0), +@@ -1149,8 +1113,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = { + .enable_mask = BIT(17), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -1434,8 +1396,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = { + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -1763,8 +1723,6 @@ static struct clk_branch gcc_lpass_q6_axi_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_lpass_q6_axi_clk", +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -1777,8 +1735,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_q6_bimc_axi_clk", +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -1806,9 +1762,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_cfg_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw }, +- .num_parents = 1, +- .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +@@ -1821,9 +1774,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_mstr_axi_clk", +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, +- .num_parents = 1, +- .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +@@ -1853,9 +1803,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_slv_axi_clk", +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, +- .num_parents = 1, +- .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +@@ -1883,9 +1830,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_cfg_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw }, +- .num_parents = 1, +- .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +@@ -1898,9 +1842,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_mstr_axi_clk", +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, +- .num_parents = 1, +- .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +@@ -1929,9 +1870,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_slv_axi_clk", +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, +- .num_parents = 1, +- .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +@@ -1959,8 +1897,6 @@ static struct clk_branch gcc_pdm_ahb_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -1988,9 +1924,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, +- .num_parents = 1, +- .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +@@ -2003,9 +1936,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, +- .num_parents = 1, +- .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +@@ -2033,9 +1963,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc3_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, +- .num_parents = 1, +- .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +@@ -2063,9 +1990,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc4_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, +- .num_parents = 1, +- .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +@@ -2123,8 +2047,6 @@ static struct clk_branch gcc_tsif_ahb_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_tsif_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -2152,8 +2074,6 @@ static struct clk_branch gcc_ufs_ahb_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -2197,8 +2117,6 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_rx_symbol_0_clk", +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -2212,8 +2130,6 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_rx_symbol_1_clk", +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -2242,8 +2158,6 @@ static struct clk_branch gcc_ufs_tx_symbol_0_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_tx_symbol_0_clk", +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -2257,8 +2171,6 @@ static struct clk_branch gcc_ufs_tx_symbol_1_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_tx_symbol_1_clk", +- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -2363,8 +2275,6 @@ static struct clk_branch gcc_usb_hs_ahb_clk = { + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb_hs_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -2487,8 +2397,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = { + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_boot_rom_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -2502,8 +2410,6 @@ static struct clk_branch gcc_prng_ahb_clk = { + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_prng_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw }, +- .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +@@ -2546,9 +2452,6 @@ static struct clk_regmap *gcc_msm8994_clocks[] = { + [GPLL0] = &gpll0.clkr, + [GPLL4_EARLY] = &gpll4_early.clkr, + [GPLL4] = &gpll4.clkr, +- [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr, +- [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr, +- [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr, + [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr, + [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr, + [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr, +@@ -2695,6 +2598,15 @@ static struct clk_regmap *gcc_msm8994_clocks[] = { + [USB_SS_PHY_LDO] = &usb_ss_phy_ldo.clkr, + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, + [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, ++ ++ /* ++ * The following clocks should NOT be managed by this driver, but they once were ++ * mistakengly added. Now they are only here to indicate that they are not defined ++ * on purpose, even though the names will stay in the header file (for ABI sanity). ++ */ ++ [CONFIG_NOC_CLK_SRC] = NULL, ++ [PERIPH_NOC_CLK_SRC] = NULL, ++ [SYSTEM_NOC_CLK_SRC] = NULL, + }; + + static struct gdsc *gcc_msm8994_gdscs[] = { +diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c +index ce63cbd14d69a..24155c038f6d0 100644 +--- a/drivers/gpio/gpio-rockchip.c ++++ b/drivers/gpio/gpio-rockchip.c +@@ -410,10 +410,8 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) + level = rockchip_gpio_readl(bank, bank->gpio_regs->int_type); + polarity = rockchip_gpio_readl(bank, bank->gpio_regs->int_polarity); + +- switch (type) { +- case IRQ_TYPE_EDGE_BOTH: ++ if (type == IRQ_TYPE_EDGE_BOTH) { + if (bank->gpio_type == GPIO_TYPE_V2) { +- bank->toggle_edge_mode &= ~mask; + rockchip_gpio_writel_bit(bank, d->hwirq, 1, + bank->gpio_regs->int_bothedge); + goto out; +@@ -431,30 +429,34 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) + else + polarity |= mask; + } +- break; +- case IRQ_TYPE_EDGE_RISING: +- bank->toggle_edge_mode &= ~mask; +- level |= mask; +- polarity |= mask; +- break; +- case IRQ_TYPE_EDGE_FALLING: +- bank->toggle_edge_mode &= ~mask; +- level |= mask; +- polarity &= ~mask; +- break; +- case IRQ_TYPE_LEVEL_HIGH: +- bank->toggle_edge_mode &= ~mask; +- level &= ~mask; +- polarity |= mask; +- break; +- case IRQ_TYPE_LEVEL_LOW: +- bank->toggle_edge_mode &= ~mask; +- level &= ~mask; +- polarity &= ~mask; +- break; +- default: +- ret = -EINVAL; +- goto out; ++ } else { ++ if (bank->gpio_type == GPIO_TYPE_V2) { ++ rockchip_gpio_writel_bit(bank, d->hwirq, 0, ++ bank->gpio_regs->int_bothedge); ++ } else { ++ bank->toggle_edge_mode &= ~mask; ++ } ++ switch (type) { ++ case IRQ_TYPE_EDGE_RISING: ++ level |= mask; ++ polarity |= mask; ++ break; ++ case IRQ_TYPE_EDGE_FALLING: ++ level |= mask; ++ polarity &= ~mask; ++ break; ++ case IRQ_TYPE_LEVEL_HIGH: ++ level &= ~mask; ++ polarity |= mask; ++ break; ++ case IRQ_TYPE_LEVEL_LOW: ++ level &= ~mask; ++ polarity &= ~mask; ++ break; ++ default: ++ ret = -EINVAL; ++ goto out; ++ } + } + + rockchip_gpio_writel(bank, level, bank->gpio_regs->int_type); +diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c +index c026e7141e4ea..f62f267dfd7d2 100644 +--- a/drivers/gpio/gpio-tegra186.c ++++ b/drivers/gpio/gpio-tegra186.c +@@ -341,9 +341,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip, + return offset + pin; + } + ++#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio) ++ + static void tegra186_irq_ack(struct irq_data *data) + { +- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(data); ++ struct tegra_gpio *gpio = to_tegra_gpio(gc); + void __iomem *base; + + base = tegra186_gpio_get_base(gpio, data->hwirq); +@@ -355,7 +358,8 @@ static void tegra186_irq_ack(struct irq_data *data) + + static void tegra186_irq_mask(struct irq_data *data) + { +- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(data); ++ struct tegra_gpio *gpio = to_tegra_gpio(gc); + void __iomem *base; + u32 value; + +@@ -370,7 +374,8 @@ static void tegra186_irq_mask(struct irq_data *data) + + static void tegra186_irq_unmask(struct irq_data *data) + { +- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(data); ++ struct tegra_gpio *gpio = to_tegra_gpio(gc); + void __iomem *base; + u32 value; + +@@ -385,7 +390,8 @@ static void tegra186_irq_unmask(struct irq_data *data) + + static int tegra186_irq_set_type(struct irq_data *data, unsigned int type) + { +- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(data); ++ struct tegra_gpio *gpio = to_tegra_gpio(gc); + void __iomem *base; + u32 value; + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +index ab3851c26f71c..8c7637233c816 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +@@ -2014,6 +2014,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, + return -ENODEV; + } + ++ if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev)) ++ amdgpu_aspm = 0; ++ + if (amdgpu_virtual_display || + amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) + supports_atomic = true; +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c +index de9b55383e9f8..d01ddce2dec1d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c +@@ -619,8 +619,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev) + static int soc15_asic_reset(struct amdgpu_device *adev) + { + /* original raven doesn't have full asic reset */ +- if ((adev->apu_flags & AMD_APU_IS_RAVEN) && +- !(adev->apu_flags & AMD_APU_IS_RAVEN2)) ++ if ((adev->apu_flags & AMD_APU_IS_RAVEN) || ++ (adev->apu_flags & AMD_APU_IS_RAVEN2)) + return 0; + + switch (soc15_asic_reset_method(adev)) { +@@ -1114,8 +1114,11 @@ static int soc15_common_early_init(void *handle) + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCN_MGCG; + ++ /* ++ * MMHUB PG needs to be disabled for Picasso for ++ * stability reasons. ++ */ + adev->pg_flags = AMD_PG_SUPPORT_SDMA | +- AMD_PG_SUPPORT_MMHUB | + AMD_PG_SUPPORT_VCN; + } else { + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 0117b00b4ed83..7a5bb5a3456a6 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -4232,6 +4232,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) + } + #endif + ++ /* Disable vblank IRQs aggressively for power-saving. */ ++ adev_to_drm(adev)->vblank_disable_immediate = true; ++ + /* loops over all connectors on the board */ + for (i = 0; i < link_cnt; i++) { + struct dc_link *link = NULL; +@@ -4277,19 +4280,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) + update_connector_ext_caps(aconnector); + if (psr_feature_enabled) + amdgpu_dm_set_psr_caps(link); ++ ++ /* TODO: Fix vblank control helpers to delay PSR entry to allow this when ++ * PSR is also supported. ++ */ ++ if (link->psr_settings.psr_feature_enabled) ++ adev_to_drm(adev)->vblank_disable_immediate = false; + } + + + } + +- /* +- * Disable vblank IRQs aggressively for power-saving. +- * +- * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR +- * is also supported. +- */ +- adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled; +- + /* Software is initialized. Now we can register interrupt handlers. */ + switch (adev->asic_type) { + #if defined(CONFIG_DRM_AMD_DC_SI) +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +index 1861a147a7fa1..5c5cbeb59c4d9 100644 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +@@ -437,8 +437,10 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) + clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1; + + /* Refresh bounding box */ ++ DC_FP_START(); + clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box( + clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); ++ DC_FP_END(); + } + + static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base) +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index e890e063cde31..1e7fe6bea300f 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -999,10 +999,13 @@ static bool dc_construct(struct dc *dc, + goto fail; + #ifdef CONFIG_DRM_AMD_DC_DCN + dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; +-#endif + +- if (dc->res_pool->funcs->update_bw_bounding_box) ++ if (dc->res_pool->funcs->update_bw_bounding_box) { ++ DC_FP_START(); + dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); ++ DC_FP_END(); ++ } ++#endif + + /* Creation of current_state must occur after dc->dml + * is initialized in dc_create_resource_pool because +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +index e2d9a46d0e1ad..6b066ceab4128 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +@@ -1876,10 +1876,6 @@ enum dc_status dc_remove_stream_from_ctx( + dc->res_pool, + del_pipe->stream_res.stream_enc, + false); +- /* Release link encoder from stream in new dc_state. */ +- if (dc->res_pool->funcs->link_enc_unassign) +- dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream); +- + #if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(del_pipe)) { + update_hpo_dp_stream_engine_usage( +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +index 446d37320b948..b55118388d2d7 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +@@ -418,6 +418,36 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu) + return 0; + } + ++static int sienna_cichlid_patch_pptable_quirk(struct smu_context *smu) ++{ ++ struct amdgpu_device *adev = smu->adev; ++ uint32_t *board_reserved; ++ uint16_t *freq_table_gfx; ++ uint32_t i; ++ ++ /* Fix some OEM SKU specific stability issues */ ++ GET_PPTABLE_MEMBER(BoardReserved, &board_reserved); ++ if ((adev->pdev->device == 0x73DF) && ++ (adev->pdev->revision == 0XC3) && ++ (adev->pdev->subsystem_device == 0x16C2) && ++ (adev->pdev->subsystem_vendor == 0x1043)) ++ board_reserved[0] = 1387; ++ ++ GET_PPTABLE_MEMBER(FreqTableGfx, &freq_table_gfx); ++ if ((adev->pdev->device == 0x73DF) && ++ (adev->pdev->revision == 0XC3) && ++ ((adev->pdev->subsystem_device == 0x16C2) || ++ (adev->pdev->subsystem_device == 0x133C)) && ++ (adev->pdev->subsystem_vendor == 0x1043)) { ++ for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) { ++ if (freq_table_gfx[i] > 2500) ++ freq_table_gfx[i] = 2500; ++ } ++ } ++ ++ return 0; ++} ++ + static int sienna_cichlid_setup_pptable(struct smu_context *smu) + { + int ret = 0; +@@ -438,7 +468,7 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu) + if (ret) + return ret; + +- return ret; ++ return sienna_cichlid_patch_pptable_quirk(smu); + } + + static int sienna_cichlid_tables_init(struct smu_context *smu) +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c +index 12893e7be89bb..f5f5de362ff2c 100644 +--- a/drivers/gpu/drm/drm_edid.c ++++ b/drivers/gpu/drm/drm_edid.c +@@ -5345,6 +5345,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi + if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) + return quirks; + ++ info->color_formats |= DRM_COLOR_FORMAT_RGB444; + drm_parse_cea_ext(connector, edid); + + /* +@@ -5393,7 +5394,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi + DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n", + connector->name, info->bpc); + +- info->color_formats |= DRM_COLOR_FORMAT_RGB444; + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) + info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) +diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c +index 8d9d888e93161..5a2f96d39ac78 100644 +--- a/drivers/gpu/drm/i915/display/intel_bw.c ++++ b/drivers/gpu/drm/i915/display/intel_bw.c +@@ -681,6 +681,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) + unsigned int max_bw_point = 0, max_bw = 0; + unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points; + unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points; ++ bool changed = false; + u32 mask = 0; + + /* FIXME earlier gens need some checks too */ +@@ -724,6 +725,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) + new_bw_state->data_rate[crtc->pipe] = new_data_rate; + new_bw_state->num_active_planes[crtc->pipe] = new_active_planes; + ++ changed = true; ++ + drm_dbg_kms(&dev_priv->drm, + "pipe %c data rate %u num active planes %u\n", + pipe_name(crtc->pipe), +@@ -731,7 +734,19 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) + new_bw_state->num_active_planes[crtc->pipe]); + } + +- if (!new_bw_state) ++ old_bw_state = intel_atomic_get_old_bw_state(state); ++ new_bw_state = intel_atomic_get_new_bw_state(state); ++ ++ if (new_bw_state && ++ intel_can_enable_sagv(dev_priv, old_bw_state) != ++ intel_can_enable_sagv(dev_priv, new_bw_state)) ++ changed = true; ++ ++ /* ++ * If none of our inputs (data rates, number of active ++ * planes, SAGV yes/no) changed then nothing to do here. ++ */ ++ if (!changed) + return 0; + + ret = intel_atomic_lock_global_state(&new_bw_state->base); +@@ -814,7 +829,6 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) + */ + new_bw_state->qgv_points_mask = ~allowed_points & mask; + +- old_bw_state = intel_atomic_get_old_bw_state(state); + /* + * If the actual mask had changed we need to make sure that + * the commits are serialized(in case this is a nomodeset, nonblocking) +diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h +index 46c6eecbd9175..0ceaed1c96562 100644 +--- a/drivers/gpu/drm/i915/display/intel_bw.h ++++ b/drivers/gpu/drm/i915/display/intel_bw.h +@@ -30,19 +30,19 @@ struct intel_bw_state { + */ + u8 pipe_sagv_reject; + ++ /* bitmask of active pipes */ ++ u8 active_pipes; ++ + /* + * Current QGV points mask, which restricts + * some particular SAGV states, not to confuse + * with pipe_sagv_mask. + */ +- u8 qgv_points_mask; ++ u16 qgv_points_mask; + + unsigned int data_rate[I915_MAX_PIPES]; + u8 num_active_planes[I915_MAX_PIPES]; + +- /* bitmask of active pipes */ +- u8 active_pipes; +- + int min_cdclk; + }; + +diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c +index 5e20f340730fb..601929bab874c 100644 +--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c ++++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c +@@ -34,7 +34,7 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv) + if (intel_de_wait_for_clear(dev_priv, ICL_PHY_MISC(phy), + DG2_PHY_DP_TX_ACK_MASK, 25)) + DRM_ERROR("SNPS PHY %c failed to calibrate after 25ms.\n", +- phy); ++ phy_name(phy)); + } + } + +diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c +index dbd7d0d83a141..7784c30fe8937 100644 +--- a/drivers/gpu/drm/i915/display/intel_tc.c ++++ b/drivers/gpu/drm/i915/display/intel_tc.c +@@ -691,6 +691,8 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) + { + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_encoder *encoder = &dig_port->base; ++ intel_wakeref_t tc_cold_wref; ++ enum intel_display_power_domain domain; + int active_links = 0; + + mutex_lock(&dig_port->tc_lock); +@@ -702,12 +704,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) + + drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED); + drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref); +- if (active_links) { +- enum intel_display_power_domain domain; +- intel_wakeref_t tc_cold_wref = tc_cold_block(dig_port, &domain); + +- dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); ++ tc_cold_wref = tc_cold_block(dig_port, &domain); + ++ dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); ++ if (active_links) { + if (!icl_tc_phy_is_connected(dig_port)) + drm_dbg_kms(&i915->drm, + "Port %s: PHY disconnected with %d active link(s)\n", +@@ -716,10 +717,23 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) + + dig_port->tc_lock_wakeref = tc_cold_block(dig_port, + &dig_port->tc_lock_power_domain); +- +- tc_cold_unblock(dig_port, domain, tc_cold_wref); ++ } else { ++ /* ++ * TBT-alt is the default mode in any case the PHY ownership is not ++ * held (regardless of the sink's connected live state), so ++ * we'll just switch to disconnected mode from it here without ++ * a note. ++ */ ++ if (dig_port->tc_mode != TC_PORT_TBT_ALT) ++ drm_dbg_kms(&i915->drm, ++ "Port %s: PHY left in %s mode on disabled port, disconnecting it\n", ++ dig_port->tc_port_name, ++ tc_port_mode_name(dig_port->tc_mode)); ++ icl_tc_phy_disconnect(dig_port); + } + ++ tc_cold_unblock(dig_port, domain, tc_cold_wref); ++ + drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n", + dig_port->tc_port_name, + tc_port_mode_name(dig_port->tc_mode)); +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index 75c1522fdae8c..7cbffd9a7be88 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -4019,6 +4019,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) + return ret; + } + ++ if (intel_can_enable_sagv(dev_priv, new_bw_state) != ++ intel_can_enable_sagv(dev_priv, old_bw_state)) { ++ ret = intel_atomic_serialize_global_state(&new_bw_state->base); ++ if (ret) ++ return ret; ++ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { ++ ret = intel_atomic_lock_global_state(&new_bw_state->base); ++ if (ret) ++ return ret; ++ } ++ + for_each_new_intel_crtc_in_state(state, crtc, + new_crtc_state, i) { + struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; +@@ -4034,17 +4045,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) + intel_can_enable_sagv(dev_priv, new_bw_state); + } + +- if (intel_can_enable_sagv(dev_priv, new_bw_state) != +- intel_can_enable_sagv(dev_priv, old_bw_state)) { +- ret = intel_atomic_serialize_global_state(&new_bw_state->base); +- if (ret) +- return ret; +- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { +- ret = intel_atomic_lock_global_state(&new_bw_state->base); +- if (ret) +- return ret; +- } +- + return 0; + } + +diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c +index e3ed52d96f423..3e61184e194c9 100644 +--- a/drivers/gpu/drm/vc4/vc4_crtc.c ++++ b/drivers/gpu/drm/vc4/vc4_crtc.c +@@ -538,9 +538,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc) + if (ret) + return ret; + +- ret = pm_runtime_put(&vc4_hdmi->pdev->dev); +- if (ret) +- return ret; ++ /* ++ * post_crtc_powerdown will have called pm_runtime_put, so we ++ * don't need it here otherwise we'll get the reference counting ++ * wrong. ++ */ + + return 0; + } +diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c +index d198a10848c6b..a89a408182e60 100644 +--- a/drivers/gpu/host1x/syncpt.c ++++ b/drivers/gpu/host1x/syncpt.c +@@ -225,27 +225,12 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, + void *ref; + struct host1x_waitlist *waiter; + int err = 0, check_count = 0; +- u32 val; + + if (value) +- *value = 0; +- +- /* first check cache */ +- if (host1x_syncpt_is_expired(sp, thresh)) { +- if (value) +- *value = host1x_syncpt_load(sp); ++ *value = host1x_syncpt_load(sp); + ++ if (host1x_syncpt_is_expired(sp, thresh)) + return 0; +- } +- +- /* try to read from register */ +- val = host1x_hw_syncpt_load(sp->host, sp); +- if (host1x_syncpt_is_expired(sp, thresh)) { +- if (value) +- *value = val; +- +- goto done; +- } + + if (!timeout) { + err = -EAGAIN; +diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c +index 3501a3ead4ba6..3ae961986fc31 100644 +--- a/drivers/hwmon/hwmon.c ++++ b/drivers/hwmon/hwmon.c +@@ -214,12 +214,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index) + + tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, + &hwmon_thermal_ops); +- /* +- * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, +- * so ignore that error but forward any other error. +- */ +- if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV)) +- return PTR_ERR(tzd); ++ if (IS_ERR(tzd)) { ++ if (PTR_ERR(tzd) != -ENODEV) ++ return PTR_ERR(tzd); ++ dev_info(dev, "temp%d_input not attached to any thermal zone\n", ++ index + 1); ++ devm_kfree(dev, tdata); ++ return 0; ++ } + + err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node); + if (err) +diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c +index b0678c351e829..c3a2b4c0b3b26 100644 +--- a/drivers/iio/accel/bmc150-accel-core.c ++++ b/drivers/iio/accel/bmc150-accel-core.c +@@ -1783,11 +1783,14 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq, + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(dev, "Unable to register iio device\n"); +- goto err_trigger_unregister; ++ goto err_pm_cleanup; + } + + return 0; + ++err_pm_cleanup: ++ pm_runtime_dont_use_autosuspend(dev); ++ pm_runtime_disable(dev); + err_trigger_unregister: + bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1); + err_buffer_cleanup: +diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c +index 32989d91b9829..f7fd9e046588b 100644 +--- a/drivers/iio/accel/fxls8962af-core.c ++++ b/drivers/iio/accel/fxls8962af-core.c +@@ -173,12 +173,20 @@ struct fxls8962af_data { + u16 upper_thres; + }; + +-const struct regmap_config fxls8962af_regmap_conf = { ++const struct regmap_config fxls8962af_i2c_regmap_conf = { + .reg_bits = 8, + .val_bits = 8, + .max_register = FXLS8962AF_MAX_REG, + }; +-EXPORT_SYMBOL_GPL(fxls8962af_regmap_conf); ++EXPORT_SYMBOL_GPL(fxls8962af_i2c_regmap_conf); ++ ++const struct regmap_config fxls8962af_spi_regmap_conf = { ++ .reg_bits = 8, ++ .pad_bits = 8, ++ .val_bits = 8, ++ .max_register = FXLS8962AF_MAX_REG, ++}; ++EXPORT_SYMBOL_GPL(fxls8962af_spi_regmap_conf); + + enum { + fxls8962af_idx_x, +diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c +index cfb004b204559..6bde9891effbf 100644 +--- a/drivers/iio/accel/fxls8962af-i2c.c ++++ b/drivers/iio/accel/fxls8962af-i2c.c +@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct i2c_client *client) + { + struct regmap *regmap; + +- regmap = devm_regmap_init_i2c(client, &fxls8962af_regmap_conf); ++ regmap = devm_regmap_init_i2c(client, &fxls8962af_i2c_regmap_conf); + if (IS_ERR(regmap)) { + dev_err(&client->dev, "Failed to initialize i2c regmap\n"); + return PTR_ERR(regmap); +diff --git a/drivers/iio/accel/fxls8962af-spi.c b/drivers/iio/accel/fxls8962af-spi.c +index 57108d3d480b6..6f4dff3238d3c 100644 +--- a/drivers/iio/accel/fxls8962af-spi.c ++++ b/drivers/iio/accel/fxls8962af-spi.c +@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct spi_device *spi) + { + struct regmap *regmap; + +- regmap = devm_regmap_init_spi(spi, &fxls8962af_regmap_conf); ++ regmap = devm_regmap_init_spi(spi, &fxls8962af_spi_regmap_conf); + if (IS_ERR(regmap)) { + dev_err(&spi->dev, "Failed to initialize spi regmap\n"); + return PTR_ERR(regmap); +diff --git a/drivers/iio/accel/fxls8962af.h b/drivers/iio/accel/fxls8962af.h +index b67572c3ef069..9cbe98c3ba9a2 100644 +--- a/drivers/iio/accel/fxls8962af.h ++++ b/drivers/iio/accel/fxls8962af.h +@@ -17,6 +17,7 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq); + int fxls8962af_core_remove(struct device *dev); + + extern const struct dev_pm_ops fxls8962af_pm_ops; +-extern const struct regmap_config fxls8962af_regmap_conf; ++extern const struct regmap_config fxls8962af_i2c_regmap_conf; ++extern const struct regmap_config fxls8962af_spi_regmap_conf; + + #endif /* _FXLS8962AF_H_ */ +diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c +index 24c9387c29687..ba6c8ca488b1a 100644 +--- a/drivers/iio/accel/kxcjk-1013.c ++++ b/drivers/iio/accel/kxcjk-1013.c +@@ -1589,11 +1589,14 @@ static int kxcjk1013_probe(struct i2c_client *client, + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(&client->dev, "unable to register iio device\n"); +- goto err_buffer_cleanup; ++ goto err_pm_cleanup; + } + + return 0; + ++err_pm_cleanup: ++ pm_runtime_dont_use_autosuspend(&client->dev); ++ pm_runtime_disable(&client->dev); + err_buffer_cleanup: + iio_triggered_buffer_cleanup(indio_dev); + err_trigger_unregister: +diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c +index 4c359fb054801..c53a3398b14c4 100644 +--- a/drivers/iio/accel/mma9551.c ++++ b/drivers/iio/accel/mma9551.c +@@ -495,11 +495,14 @@ static int mma9551_probe(struct i2c_client *client, + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(&client->dev, "unable to register iio device\n"); +- goto out_poweroff; ++ goto err_pm_cleanup; + } + + return 0; + ++err_pm_cleanup: ++ pm_runtime_dont_use_autosuspend(&client->dev); ++ pm_runtime_disable(&client->dev); + out_poweroff: + mma9551_set_device_state(client, false); + +diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c +index ba3ecb3b57dcd..1599b75724d4f 100644 +--- a/drivers/iio/accel/mma9553.c ++++ b/drivers/iio/accel/mma9553.c +@@ -1134,12 +1134,15 @@ static int mma9553_probe(struct i2c_client *client, + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(&client->dev, "unable to register iio device\n"); +- goto out_poweroff; ++ goto err_pm_cleanup; + } + + dev_dbg(&indio_dev->dev, "Registered device %s\n", name); + return 0; + ++err_pm_cleanup: ++ pm_runtime_dont_use_autosuspend(&client->dev); ++ pm_runtime_disable(&client->dev); + out_poweroff: + mma9551_set_device_state(client, false); + return ret; +diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c +index e45c600fccc0b..18c154afbd7ac 100644 +--- a/drivers/iio/adc/ad7124.c ++++ b/drivers/iio/adc/ad7124.c +@@ -76,7 +76,7 @@ + #define AD7124_CONFIG_REF_SEL(x) FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x) + #define AD7124_CONFIG_PGA_MSK GENMASK(2, 0) + #define AD7124_CONFIG_PGA(x) FIELD_PREP(AD7124_CONFIG_PGA_MSK, x) +-#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(7, 6) ++#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(6, 5) + #define AD7124_CONFIG_IN_BUFF(x) FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x) + + /* AD7124_FILTER_X */ +diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c +index 42ea8bc7e7805..adc5ceaef8c93 100644 +--- a/drivers/iio/adc/men_z188_adc.c ++++ b/drivers/iio/adc/men_z188_adc.c +@@ -103,6 +103,7 @@ static int men_z188_probe(struct mcb_device *dev, + struct z188_adc *adc; + struct iio_dev *indio_dev; + struct resource *mem; ++ int ret; + + indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc)); + if (!indio_dev) +@@ -128,8 +129,14 @@ static int men_z188_probe(struct mcb_device *dev, + adc->mem = mem; + mcb_set_drvdata(dev, indio_dev); + +- return iio_device_register(indio_dev); ++ ret = iio_device_register(indio_dev); ++ if (ret) ++ goto err_unmap; ++ ++ return 0; + ++err_unmap: ++ iounmap(adc->base); + err: + mcb_release_mem(mem); + return -ENXIO; +diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c +index d84ae6b008c1b..e8fc4d01f30b6 100644 +--- a/drivers/iio/adc/ti-tsc2046.c ++++ b/drivers/iio/adc/ti-tsc2046.c +@@ -388,7 +388,7 @@ static int tsc2046_adc_update_scan_mode(struct iio_dev *indio_dev, + mutex_lock(&priv->slock); + + size = 0; +- for_each_set_bit(ch_idx, active_scan_mask, indio_dev->num_channels) { ++ for_each_set_bit(ch_idx, active_scan_mask, ARRAY_SIZE(priv->l)) { + size += tsc2046_adc_group_set_layout(priv, group, ch_idx); + tsc2046_adc_group_set_cmd(priv, group, ch_idx); + group++; +@@ -548,7 +548,7 @@ static int tsc2046_adc_setup_spi_msg(struct tsc2046_adc_priv *priv) + * enabled. + */ + size = 0; +- for (ch_idx = 0; ch_idx < priv->dcfg->num_channels; ch_idx++) ++ for (ch_idx = 0; ch_idx < ARRAY_SIZE(priv->l); ch_idx++) + size += tsc2046_adc_group_set_layout(priv, ch_idx, ch_idx); + + priv->tx = devm_kzalloc(&priv->spi->dev, size, GFP_KERNEL); +diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c +index 17b939a367ad0..81a6d09788bd7 100644 +--- a/drivers/iio/gyro/bmg160_core.c ++++ b/drivers/iio/gyro/bmg160_core.c +@@ -1188,11 +1188,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq, + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(dev, "unable to register iio device\n"); +- goto err_buffer_cleanup; ++ goto err_pm_cleanup; + } + + return 0; + ++err_pm_cleanup: ++ pm_runtime_dont_use_autosuspend(dev); ++ pm_runtime_disable(dev); + err_buffer_cleanup: + iio_triggered_buffer_cleanup(indio_dev); + err_trigger_unregister: +diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c +index ed129321a14da..f9b4540db1f43 100644 +--- a/drivers/iio/imu/adis16480.c ++++ b/drivers/iio/imu/adis16480.c +@@ -1403,6 +1403,7 @@ static int adis16480_probe(struct spi_device *spi) + { + const struct spi_device_id *id = spi_get_device_id(spi); + const struct adis_data *adis16480_data; ++ irq_handler_t trigger_handler = NULL; + struct iio_dev *indio_dev; + struct adis16480 *st; + int ret; +@@ -1474,8 +1475,12 @@ static int adis16480_probe(struct spi_device *spi) + st->clk_freq = st->chip_info->int_clk; + } + ++ /* Only use our trigger handler if burst mode is supported */ ++ if (adis16480_data->burst_len) ++ trigger_handler = adis16480_trigger_handler; ++ + ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, +- adis16480_trigger_handler); ++ trigger_handler); + if (ret) + return ret; + +diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c +index 1dabfd615dabf..f89724481df93 100644 +--- a/drivers/iio/imu/kmx61.c ++++ b/drivers/iio/imu/kmx61.c +@@ -1385,7 +1385,7 @@ static int kmx61_probe(struct i2c_client *client, + ret = iio_device_register(data->acc_indio_dev); + if (ret < 0) { + dev_err(&client->dev, "Failed to register acc iio device\n"); +- goto err_buffer_cleanup_mag; ++ goto err_pm_cleanup; + } + + ret = iio_device_register(data->mag_indio_dev); +@@ -1398,6 +1398,9 @@ static int kmx61_probe(struct i2c_client *client, + + err_iio_unregister_acc: + iio_device_unregister(data->acc_indio_dev); ++err_pm_cleanup: ++ pm_runtime_dont_use_autosuspend(&client->dev); ++ pm_runtime_disable(&client->dev); + err_buffer_cleanup_mag: + if (client->irq > 0) + iio_triggered_buffer_cleanup(data->mag_indio_dev); +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +index f2cbbc756459b..32d9a5e30685b 100644 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +@@ -1374,8 +1374,12 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor, + if (err < 0) + return err; + ++ /* ++ * we need to wait for sensor settling time before ++ * reading data in order to avoid corrupted samples ++ */ + delay = 1000000000 / sensor->odr; +- usleep_range(delay, 2 * delay); ++ usleep_range(3 * delay, 4 * delay); + + err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data)); + if (err < 0) +diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c +index f96f531753495..3d4d21f979fab 100644 +--- a/drivers/iio/magnetometer/bmc150_magn.c ++++ b/drivers/iio/magnetometer/bmc150_magn.c +@@ -962,13 +962,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap, + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(dev, "unable to register iio device\n"); +- goto err_disable_runtime_pm; ++ goto err_pm_cleanup; + } + + dev_dbg(dev, "Registered device %s\n", name); + return 0; + +-err_disable_runtime_pm: ++err_pm_cleanup: ++ pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); + err_buffer_cleanup: + iio_triggered_buffer_cleanup(indio_dev); +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index a8da4291e7e3b..41ec05c4b0d0e 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -3370,22 +3370,30 @@ err: + static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, + const struct sockaddr *dst_addr) + { +- if (!src_addr || !src_addr->sa_family) { +- src_addr = (struct sockaddr *) &id->route.addr.src_addr; +- src_addr->sa_family = dst_addr->sa_family; +- if (IS_ENABLED(CONFIG_IPV6) && +- dst_addr->sa_family == AF_INET6) { +- struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; +- struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; +- src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; +- if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) +- id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; +- } else if (dst_addr->sa_family == AF_IB) { +- ((struct sockaddr_ib *) src_addr)->sib_pkey = +- ((struct sockaddr_ib *) dst_addr)->sib_pkey; +- } +- } +- return rdma_bind_addr(id, src_addr); ++ struct sockaddr_storage zero_sock = {}; ++ ++ if (src_addr && src_addr->sa_family) ++ return rdma_bind_addr(id, src_addr); ++ ++ /* ++ * When the src_addr is not specified, automatically supply an any addr ++ */ ++ zero_sock.ss_family = dst_addr->sa_family; ++ if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { ++ struct sockaddr_in6 *src_addr6 = ++ (struct sockaddr_in6 *)&zero_sock; ++ struct sockaddr_in6 *dst_addr6 = ++ (struct sockaddr_in6 *)dst_addr; ++ ++ src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; ++ if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) ++ id->route.addr.dev_addr.bound_dev_if = ++ dst_addr6->sin6_scope_id; ++ } else if (dst_addr->sa_family == AF_IB) { ++ ((struct sockaddr_ib *)&zero_sock)->sib_pkey = ++ ((struct sockaddr_ib *)dst_addr)->sib_pkey; ++ } ++ return rdma_bind_addr(id, (struct sockaddr *)&zero_sock); + } + + /* +diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c +index 0a3b28142c05b..41c272980f91c 100644 +--- a/drivers/infiniband/hw/qib/qib_sysfs.c ++++ b/drivers/infiniband/hw/qib/qib_sysfs.c +@@ -541,7 +541,7 @@ static struct attribute *port_diagc_attributes[] = { + }; + + static const struct attribute_group port_diagc_group = { +- .name = "linkcontrol", ++ .name = "diag_counters", + .attrs = port_diagc_attributes, + }; + +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c +index e39709dee179d..be96701cf281e 100644 +--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c ++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c +@@ -2664,6 +2664,8 @@ static void rtrs_clt_dev_release(struct device *dev) + { + struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); + ++ mutex_destroy(&clt->paths_ev_mutex); ++ mutex_destroy(&clt->paths_mutex); + kfree(clt); + } + +@@ -2693,6 +2695,8 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num, + return ERR_PTR(-ENOMEM); + } + ++ clt->dev.class = rtrs_clt_dev_class; ++ clt->dev.release = rtrs_clt_dev_release; + uuid_gen(&clt->paths_uuid); + INIT_LIST_HEAD_RCU(&clt->paths_list); + clt->paths_num = paths_num; +@@ -2709,53 +2713,51 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num, + init_waitqueue_head(&clt->permits_wait); + mutex_init(&clt->paths_ev_mutex); + mutex_init(&clt->paths_mutex); ++ device_initialize(&clt->dev); + +- clt->dev.class = rtrs_clt_dev_class; +- clt->dev.release = rtrs_clt_dev_release; + err = dev_set_name(&clt->dev, "%s", sessname); + if (err) +- goto err; ++ goto err_put; ++ + /* + * Suppress user space notification until + * sysfs files are created + */ + dev_set_uevent_suppress(&clt->dev, true); +- err = device_register(&clt->dev); +- if (err) { +- put_device(&clt->dev); +- goto err; +- } ++ err = device_add(&clt->dev); ++ if (err) ++ goto err_put; + + clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj); + if (!clt->kobj_paths) { + err = -ENOMEM; +- goto err_dev; ++ goto err_del; + } + err = rtrs_clt_create_sysfs_root_files(clt); + if (err) { + kobject_del(clt->kobj_paths); + kobject_put(clt->kobj_paths); +- goto err_dev; ++ goto err_del; + } + dev_set_uevent_suppress(&clt->dev, false); + kobject_uevent(&clt->dev.kobj, KOBJ_ADD); + + return clt; +-err_dev: +- device_unregister(&clt->dev); +-err: ++err_del: ++ device_del(&clt->dev); ++err_put: + free_percpu(clt->pcpu_path); +- kfree(clt); ++ put_device(&clt->dev); + return ERR_PTR(err); + } + + static void free_clt(struct rtrs_clt *clt) + { +- free_permits(clt); + free_percpu(clt->pcpu_path); +- mutex_destroy(&clt->paths_ev_mutex); +- mutex_destroy(&clt->paths_mutex); +- /* release callback will free clt in last put */ ++ ++ /* ++ * release callback will free clt and destroy mutexes in last put ++ */ + device_unregister(&clt->dev); + } + +@@ -2872,6 +2874,7 @@ void rtrs_clt_close(struct rtrs_clt *clt) + rtrs_clt_destroy_sess_files(sess, NULL); + kobject_put(&sess->kobj); + } ++ free_permits(clt); + free_clt(clt); + } + EXPORT_SYMBOL(rtrs_clt_close); +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c +index e174e853f8a40..285b766e4e704 100644 +--- a/drivers/infiniband/ulp/srp/ib_srp.c ++++ b/drivers/infiniband/ulp/srp/ib_srp.c +@@ -4047,9 +4047,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data) + spin_unlock(&host->target_lock); + + /* +- * Wait for tl_err and target port removal tasks. ++ * srp_queue_remove_work() queues a call to ++ * srp_remove_target(). The latter function cancels ++ * target->tl_err_work so waiting for the remove works to ++ * finish is sufficient. + */ +- flush_workqueue(system_long_wq); + flush_workqueue(srp_remove_wq); + + kfree(host); +diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c +index fc0bed14bfb10..02ca6e5fa0dc7 100644 +--- a/drivers/mtd/mtdcore.c ++++ b/drivers/mtd/mtdcore.c +@@ -546,6 +546,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) + config.stride = 1; + config.read_only = true; + config.root_only = true; ++ config.ignore_wp = true; + config.no_of_node = !of_device_is_compatible(node, "nvmem-cells"); + config.priv = mtd; + +@@ -830,6 +831,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd, + config.owner = THIS_MODULE; + config.type = NVMEM_TYPE_OTP; + config.root_only = true; ++ config.ignore_wp = true; + config.reg_read = reg_read; + config.size = size; + config.of_node = np; +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +index 125dafe1db7ee..4ce596daeaae3 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +@@ -100,6 +100,9 @@ MODULE_LICENSE("GPL"); + MODULE_FIRMWARE(FW_FILE_NAME_E1); + MODULE_FIRMWARE(FW_FILE_NAME_E1H); + MODULE_FIRMWARE(FW_FILE_NAME_E2); ++MODULE_FIRMWARE(FW_FILE_NAME_E1_V15); ++MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15); ++MODULE_FIRMWARE(FW_FILE_NAME_E2_V15); + + int bnx2x_num_queues; + module_param_named(num_queues, bnx2x_num_queues, int, 0444); +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 7eaf74e5b2929..fab8dd73fa84c 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -4719,8 +4719,10 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) + return rc; + + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); +- req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); +- req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); ++ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { ++ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); ++ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); ++ } + req->mask = cpu_to_le32(vnic->rx_mask); + return hwrm_req_send_silent(bp, req); + } +@@ -7774,6 +7776,19 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) + return 0; + } + ++static void bnxt_remap_fw_health_regs(struct bnxt *bp) ++{ ++ if (!bp->fw_health) ++ return; ++ ++ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { ++ bp->fw_health->status_reliable = true; ++ bp->fw_health->resets_reliable = true; ++ } else { ++ bnxt_try_map_fw_health_reg(bp); ++ } ++} ++ + static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) + { + struct bnxt_fw_health *fw_health = bp->fw_health; +@@ -8623,6 +8638,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) + vnic->uc_filter_count = 1; + + vnic->rx_mask = 0; ++ if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) ++ goto skip_rx_mask; ++ + if (bp->dev->flags & IFF_BROADCAST) + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + +@@ -8632,7 +8650,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) + if (bp->dev->flags & IFF_ALLMULTI) { + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; +- } else { ++ } else if (bp->dev->flags & IFF_MULTICAST) { + u32 mask = 0; + + bnxt_mc_list_updated(bp, &mask); +@@ -8643,6 +8661,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) + if (rc) + goto err_out; + ++skip_rx_mask: + rc = bnxt_hwrm_set_coal(bp); + if (rc) + netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", +@@ -9830,8 +9849,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) + resc_reinit = true; + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) + fw_reset = true; +- else if (bp->fw_health && !bp->fw_health->status_reliable) +- bnxt_try_map_fw_health_reg(bp); ++ else ++ bnxt_remap_fw_health_regs(bp); + + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { + netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); +@@ -10310,13 +10329,15 @@ int bnxt_half_open_nic(struct bnxt *bp) + goto half_open_err; + } + +- rc = bnxt_alloc_mem(bp, false); ++ rc = bnxt_alloc_mem(bp, true); + if (rc) { + netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); + goto half_open_err; + } +- rc = bnxt_init_nic(bp, false); ++ set_bit(BNXT_STATE_HALF_OPEN, &bp->state); ++ rc = bnxt_init_nic(bp, true); + if (rc) { ++ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); + netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); + goto half_open_err; + } +@@ -10324,7 +10345,7 @@ int bnxt_half_open_nic(struct bnxt *bp) + + half_open_err: + bnxt_free_skbs(bp); +- bnxt_free_mem(bp, false); ++ bnxt_free_mem(bp, true); + dev_close(bp->dev); + return rc; + } +@@ -10334,9 +10355,10 @@ half_open_err: + */ + void bnxt_half_close_nic(struct bnxt *bp) + { +- bnxt_hwrm_resource_free(bp, false, false); ++ bnxt_hwrm_resource_free(bp, false, true); + bnxt_free_skbs(bp); +- bnxt_free_mem(bp, false); ++ bnxt_free_mem(bp, true); ++ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); + } + + void bnxt_reenable_sriov(struct bnxt *bp) +@@ -10752,7 +10774,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) + if (dev->flags & IFF_ALLMULTI) { + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; +- } else { ++ } else if (dev->flags & IFF_MULTICAST) { + mc_update = bnxt_mc_list_updated(bp, &mask); + } + +@@ -10820,9 +10842,10 @@ skip_uc: + !bnxt_promisc_ok(bp)) + vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); +- if (rc && vnic->mc_list_count) { ++ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { + netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", + rc); ++ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h +index 6bacd5fae6ba5..2846d14756671 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h +@@ -1919,6 +1919,7 @@ struct bnxt { + #define BNXT_STATE_RECOVER 12 + #define BNXT_STATE_FW_NON_FATAL_COND 13 + #define BNXT_STATE_FW_ACTIVATE_RESET 14 ++#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */ + + #define BNXT_NO_FW_ACCESS(bp) \ + (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \ +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +index 951c4c569a9b3..61e0373079316 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +@@ -366,6 +366,16 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack, + } + } + ++/* Live patch status in NVM */ ++#define BNXT_LIVEPATCH_NOT_INSTALLED 0 ++#define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL ++#define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE ++#define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \ ++ FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) ++#define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK ++ ++#define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK) ++ + static int + bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) + { +@@ -373,8 +383,9 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) + struct hwrm_fw_livepatch_query_input *query_req; + struct hwrm_fw_livepatch_output *patch_resp; + struct hwrm_fw_livepatch_input *patch_req; ++ u16 flags, live_patch_state; ++ bool activated = false; + u32 installed = 0; +- u16 flags; + u8 target; + int rc; + +@@ -393,7 +404,6 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) + hwrm_req_drop(bp, query_req); + return rc; + } +- patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE; + patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL; + patch_resp = hwrm_req_hold(bp, patch_req); + +@@ -406,12 +416,20 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) + } + + flags = le16_to_cpu(query_resp->status_flags); +- if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) ++ live_patch_state = BNXT_LIVEPATCH_STATE(flags); ++ ++ if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED) + continue; +- if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) && +- !strncmp(query_resp->active_ver, query_resp->install_ver, +- sizeof(query_resp->active_ver))) ++ ++ if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) { ++ activated = true; + continue; ++ } ++ ++ if (live_patch_state == BNXT_LIVEPATCH_INSTALLED) ++ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE; ++ else if (live_patch_state == BNXT_LIVEPATCH_REMOVED) ++ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE; + + patch_req->fw_target = target; + rc = hwrm_req_send(bp, patch_req); +@@ -423,8 +441,13 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) + } + + if (!rc && !installed) { +- NL_SET_ERR_MSG_MOD(extack, "No live patches found"); +- rc = -ENOENT; ++ if (activated) { ++ NL_SET_ERR_MSG_MOD(extack, "Live patch already activated"); ++ rc = -EEXIST; ++ } else { ++ NL_SET_ERR_MSG_MOD(extack, "No live patches found"); ++ rc = -ENOENT; ++ } + } + hwrm_req_drop(bp, query_req); + hwrm_req_drop(bp, patch_req); +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +index 7307df49c1313..f147ad5a65315 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +@@ -25,6 +25,7 @@ + #include "bnxt_hsi.h" + #include "bnxt.h" + #include "bnxt_hwrm.h" ++#include "bnxt_ulp.h" + #include "bnxt_xdp.h" + #include "bnxt_ptp.h" + #include "bnxt_ethtool.h" +@@ -1944,6 +1945,9 @@ static int bnxt_get_fecparam(struct net_device *dev, + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: + fec->active_fec |= ETHTOOL_FEC_LLRS; + break; ++ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: ++ fec->active_fec |= ETHTOOL_FEC_OFF; ++ break; + } + return 0; + } +@@ -3429,7 +3433,7 @@ static int bnxt_run_loopback(struct bnxt *bp) + if (!skb) + return -ENOMEM; + data = skb_put(skb, pkt_size); +- eth_broadcast_addr(data); ++ ether_addr_copy(&data[i], bp->dev->dev_addr); + i += ETH_ALEN; + ether_addr_copy(&data[i], bp->dev->dev_addr); + i += ETH_ALEN; +@@ -3523,9 +3527,12 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, + if (!offline) { + bnxt_run_fw_tests(bp, test_mask, &test_results); + } else { +- rc = bnxt_close_nic(bp, false, false); +- if (rc) ++ bnxt_ulp_stop(bp); ++ rc = bnxt_close_nic(bp, true, false); ++ if (rc) { ++ bnxt_ulp_start(bp, rc); + return; ++ } + bnxt_run_fw_tests(bp, test_mask, &test_results); + + buf[BNXT_MACLPBK_TEST_IDX] = 1; +@@ -3535,6 +3542,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, + if (rc) { + bnxt_hwrm_mac_loopback(bp, false); + etest->flags |= ETH_TEST_FL_FAILED; ++ bnxt_ulp_start(bp, rc); + return; + } + if (bnxt_run_loopback(bp)) +@@ -3560,7 +3568,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, + } + bnxt_hwrm_phy_loopback(bp, false, false); + bnxt_half_close_nic(bp); +- rc = bnxt_open_nic(bp, false, true); ++ rc = bnxt_open_nic(bp, true, true); ++ bnxt_ulp_start(bp, rc); + } + if (rc || bnxt_test_irq(bp)) { + buf[BNXT_IRQ_TEST_IDX] = 1; +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c +index 8171f4912fa01..3a0eeb3737767 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c +@@ -595,18 +595,24 @@ timeout_abort: + + /* Last byte of resp contains valid bit */ + valid = ((u8 *)ctx->resp) + len - 1; +- for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { ++ for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) { + /* make sure we read from updated DMA memory */ + dma_rmb(); + if (*valid) + break; +- usleep_range(1, 5); ++ if (j < 10) { ++ udelay(1); ++ j++; ++ } else { ++ usleep_range(20, 30); ++ j += 20; ++ } + } + + if (j >= HWRM_VALID_BIT_DELAY_USEC) { + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n", +- hwrm_total_timeout(i), ++ hwrm_total_timeout(i) + j, + le16_to_cpu(ctx->req->req_type), + le16_to_cpu(ctx->req->seq_id), len, + *valid); +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h +index 9a9fc4e8041b6..380ef69afb51b 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h +@@ -94,7 +94,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n) + } + + +-#define HWRM_VALID_BIT_DELAY_USEC 150 ++#define HWRM_VALID_BIT_DELAY_USEC 50000 + + static inline bool bnxt_cfa_hwrm_message(u16 req_type) + { +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index d5d33325a413e..1c6bc69197a53 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -5919,10 +5919,14 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr, + be64_to_cpu(session_token)); + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, + H_SESSION_ERR_DETECTED, session_token, 0, 0); +- if (rc) ++ if (rc) { + netdev_err(netdev, + "H_VIOCTL initiated failover failed, rc %ld\n", + rc); ++ goto last_resort; ++ } ++ ++ return count; + + last_resort: + netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n"); +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c +index d3af1457fa0dc..1eddb99c4e9e1 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c +@@ -5372,15 +5372,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, + /* There is no need to reset BW when mqprio mode is on. */ + if (pf->flags & I40E_FLAG_TC_MQPRIO) + return 0; +- +- if (!vsi->mqprio_qopt.qopt.hw) { +- if (pf->flags & I40E_FLAG_DCB_ENABLED) +- goto skip_reset; +- +- if (IS_ENABLED(CONFIG_I40E_DCB) && +- i40e_dcb_hw_get_num_tc(&pf->hw) == 1) +- goto skip_reset; +- ++ if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { + ret = i40e_set_bw_limit(vsi, vsi->seid, 0); + if (ret) + dev_info(&pf->pdev->dev, +@@ -5388,8 +5380,6 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, + vsi->seid); + return ret; + } +- +-skip_reset: + memset(&bw_data, 0, sizeof(bw_data)); + bw_data.tc_valid_bits = enabled_tc; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) +diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h +index b3e1fc6a0a8eb..b067dd9c71e78 100644 +--- a/drivers/net/ethernet/intel/ice/ice.h ++++ b/drivers/net/ethernet/intel/ice/ice.h +@@ -280,7 +280,6 @@ enum ice_pf_state { + ICE_VFLR_EVENT_PENDING, + ICE_FLTR_OVERFLOW_PROMISC, + ICE_VF_DIS, +- ICE_VF_DEINIT_IN_PROGRESS, + ICE_CFG_BUSY, + ICE_SERVICE_SCHED, + ICE_SERVICE_DIS, +diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c +index e9a0159cb8b92..ec8c980f73421 100644 +--- a/drivers/net/ethernet/intel/ice/ice_common.c ++++ b/drivers/net/ethernet/intel/ice/ice_common.c +@@ -3319,7 +3319,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + + if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && + !ice_fw_supports_report_dflt_cfg(hw)) { +- struct ice_link_default_override_tlv tlv; ++ struct ice_link_default_override_tlv tlv = { 0 }; + + status = ice_get_link_default_override(&tlv, pi); + if (status) +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c +index 5b4be432b60ce..8ee778aaa8000 100644 +--- a/drivers/net/ethernet/intel/ice/ice_main.c ++++ b/drivers/net/ethernet/intel/ice/ice_main.c +@@ -1772,7 +1772,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf) + * reset, so print the event prior to reset. + */ + ice_print_vf_rx_mdd_event(vf); ++ mutex_lock(&pf->vf[i].cfg_lock); + ice_reset_vf(&pf->vf[i], false); ++ mutex_unlock(&pf->vf[i].cfg_lock); + } + } + } +diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c +index 442b031b0edc0..fdb9c4b367588 100644 +--- a/drivers/net/ethernet/intel/ice/ice_ptp.c ++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c +@@ -1121,9 +1121,12 @@ exit: + static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) + { + struct timespec64 now, then; ++ int ret; + + then = ns_to_timespec64(delta); +- ice_ptp_gettimex64(info, &now, NULL); ++ ret = ice_ptp_gettimex64(info, &now, NULL); ++ if (ret) ++ return ret; + now = timespec64_add(now, then); + + return ice_ptp_settime64(info, (const struct timespec64 *)&now); +diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c +index 25cca5c4ae575..275a99f62b285 100644 +--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c ++++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c +@@ -711,7 +711,7 @@ ice_tc_set_port(struct flow_match_ports match, + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT; + else + fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; +- fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; ++ + headers->l4_key.dst_port = match.key->dst; + headers->l4_mask.dst_port = match.mask->dst; + } +@@ -720,7 +720,7 @@ ice_tc_set_port(struct flow_match_ports match, + fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT; + else + fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; +- fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; ++ + headers->l4_key.src_port = match.key->src; + headers->l4_mask.src_port = match.mask->src; + } +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +index 6427e7ec93de6..a12cc305c4619 100644 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +@@ -617,8 +617,6 @@ void ice_free_vfs(struct ice_pf *pf) + struct ice_hw *hw = &pf->hw; + unsigned int tmp, i; + +- set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); +- + if (!pf->vf) + return; + +@@ -636,22 +634,26 @@ void ice_free_vfs(struct ice_pf *pf) + else + dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); + +- /* Avoid wait time by stopping all VFs at the same time */ +- ice_for_each_vf(pf, i) +- ice_dis_vf_qs(&pf->vf[i]); +- + tmp = pf->num_alloc_vfs; + pf->num_qps_per_vf = 0; + pf->num_alloc_vfs = 0; + for (i = 0; i < tmp; i++) { +- if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { ++ struct ice_vf *vf = &pf->vf[i]; ++ ++ mutex_lock(&vf->cfg_lock); ++ ++ ice_dis_vf_qs(vf); ++ ++ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + /* disable VF qp mappings and set VF disable state */ +- ice_dis_vf_mappings(&pf->vf[i]); +- set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states); +- ice_free_vf_res(&pf->vf[i]); ++ ice_dis_vf_mappings(vf); ++ set_bit(ICE_VF_STATE_DIS, vf->vf_states); ++ ice_free_vf_res(vf); + } + +- mutex_destroy(&pf->vf[i].cfg_lock); ++ mutex_unlock(&vf->cfg_lock); ++ ++ mutex_destroy(&vf->cfg_lock); + } + + if (ice_sriov_free_msix_res(pf)) +@@ -687,7 +689,6 @@ void ice_free_vfs(struct ice_pf *pf) + i); + + clear_bit(ICE_VF_DIS, pf->state); +- clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); + clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); + } + +@@ -1613,6 +1614,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) + ice_for_each_vf(pf, v) { + vf = &pf->vf[v]; + ++ mutex_lock(&vf->cfg_lock); ++ + vf->driver_caps = 0; + ice_vc_set_default_allowlist(vf); + +@@ -1627,6 +1630,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) + ice_vf_pre_vsi_rebuild(vf); + ice_vf_rebuild_vsi(vf); + ice_vf_post_vsi_rebuild(vf); ++ ++ mutex_unlock(&vf->cfg_lock); + } + + if (ice_is_eswitch_mode_switchdev(pf)) +@@ -1677,6 +1682,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) + u32 reg; + int i; + ++ lockdep_assert_held(&vf->cfg_lock); ++ + dev = ice_pf_to_dev(pf); + + if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { +@@ -2176,9 +2183,12 @@ void ice_process_vflr_event(struct ice_pf *pf) + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + /* read GLGEN_VFLRSTAT register to find out the flr VFs */ + reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); +- if (reg & BIT(bit_idx)) ++ if (reg & BIT(bit_idx)) { + /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ ++ mutex_lock(&vf->cfg_lock); + ice_reset_vf(vf, true); ++ mutex_unlock(&vf->cfg_lock); ++ } + } + } + +@@ -2255,7 +2265,9 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) + if (!vf) + return; + ++ mutex_lock(&vf->cfg_lock); + ice_vc_reset_vf(vf); ++ mutex_unlock(&vf->cfg_lock); + } + + /** +@@ -4651,10 +4663,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) + struct device *dev; + int err = 0; + +- /* if de-init is underway, don't process messages from VF */ +- if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state)) +- return; +- + dev = ice_pf_to_dev(pf); + if (ice_validate_vf_id(pf, vf_id)) { + err = -EINVAL; +diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c +index bb14fa2241a36..0636783f7bc03 100644 +--- a/drivers/net/ethernet/marvell/mv643xx_eth.c ++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c +@@ -2700,6 +2700,16 @@ MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); + + static struct platform_device *port_platdev[3]; + ++static void mv643xx_eth_shared_of_remove(void) ++{ ++ int n; ++ ++ for (n = 0; n < 3; n++) { ++ platform_device_del(port_platdev[n]); ++ port_platdev[n] = NULL; ++ } ++} ++ + static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, + struct device_node *pnp) + { +@@ -2736,7 +2746,9 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, + return -EINVAL; + } + +- of_get_mac_address(pnp, ppd.mac_addr); ++ ret = of_get_mac_address(pnp, ppd.mac_addr); ++ if (ret) ++ return ret; + + mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); + mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); +@@ -2800,21 +2812,13 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) + ret = mv643xx_eth_shared_of_add_port(pdev, pnp); + if (ret) { + of_node_put(pnp); ++ mv643xx_eth_shared_of_remove(); + return ret; + } + } + return 0; + } + +-static void mv643xx_eth_shared_of_remove(void) +-{ +- int n; +- +- for (n = 0; n < 3; n++) { +- platform_device_del(port_platdev[n]); +- port_platdev[n] = NULL; +- } +-} + #else + static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev) + { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c +index 60952b33b5688..d2333310b56fe 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c +@@ -60,37 +60,31 @@ static int parse_tunnel(struct mlx5e_priv *priv, + void *headers_v) + { + struct flow_rule *rule = flow_cls_offload_flow_rule(f); +- struct flow_match_enc_keyid enc_keyid; + struct flow_match_mpls match; + void *misc2_c; + void *misc2_v; + +- misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, +- misc_parameters_2); +- misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, +- misc_parameters_2); +- +- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) +- return 0; +- +- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) +- return 0; +- +- flow_rule_match_enc_keyid(rule, &enc_keyid); +- +- if (!enc_keyid.mask->keyid) +- return 0; +- + if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) && + !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP)) + return -EOPNOTSUPP; + ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) ++ return -EOPNOTSUPP; ++ ++ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) ++ return 0; ++ + flow_rule_match_mpls(rule, &match); + + /* Only support matching the first LSE */ + if (match.mask->used_lses != 1) + return -EOPNOTSUPP; + ++ misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, ++ misc_parameters_2); ++ misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, ++ misc_parameters_2); ++ + MLX5_SET(fte_match_set_misc2, misc2_c, + outer_first_mpls_over_udp.mpls_label, + match.mask->ls[0].mpls_label); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +index c2ea5fad48ddf..58c72142804a5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +@@ -1752,7 +1752,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, + if (size_read < 0) { + netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n", + __func__, size_read); +- return 0; ++ return size_read; + } + + i += size_read; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index bf25d0aa74c3b..ea0968ea88d6a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -1348,7 +1348,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, + } + + /* True when explicitly set via priv flag, or XDP prog is loaded */ +- if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) ++ if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) || ++ get_cqe_tls_offload(cqe)) + goto csum_unnecessary; + + /* CQE csum doesn't cover padding octets in short ethernet +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +index 8c9163d2c6468..08a75654f5f18 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +@@ -334,6 +334,7 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, + netdev_info(ndev, "\t[%d] %s start..\n", i, st.name); + buf[count] = st.st_func(priv); + netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]); ++ count++; + } + + mutex_unlock(&priv->state_lock); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index eae37934cdf70..308733cbaf775 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3427,6 +3427,18 @@ actions_match_supported(struct mlx5e_priv *priv, + return false; + } + ++ if (!(~actions & ++ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { ++ NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action"); ++ return false; ++ } ++ ++ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && ++ actions & MLX5_FLOW_CONTEXT_ACTION_DROP) { ++ NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported"); ++ return false; ++ } ++ + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && + actions & MLX5_FLOW_CONTEXT_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported"); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index ccb66428aeb5b..52b973e244189 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -2838,10 +2838,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) + if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) + return false; + +- if (mlx5_core_is_ecpf_esw_manager(esw->dev) || +- mlx5_ecpf_vport_exists(esw->dev)) +- return false; +- + return true; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 386ab9a2d490f..4f6b010726998 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -2073,6 +2073,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) + fte->node.del_hw_func = NULL; + up_write_ref_node(&fte->node, false); + tree_put_node(&fte->node, false); ++ } else { ++ up_write_ref_node(&fte->node, false); + } + kfree(handle); + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +index df58cba37930a..1e8ec4f236b28 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +@@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains) + + u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains) + { ++ if (!mlx5_chains_prios_supported(chains)) ++ return 1; ++ + if (mlx5_chains_ignore_flow_level_supported(chains)) + return UINT_MAX; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 6e381111f1d2f..c3861c69521c2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -510,7 +510,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) + + /* Check log_max_qp from HCA caps to set in current profile */ + if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) { +- prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp); ++ prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp)); + } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) { + mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n", + prof->log_max_qp, +@@ -1796,10 +1796,12 @@ static const struct pci_device_id mlx5_core_pci_table[] = { + { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ + { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ + { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ ++ { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */ + { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ + { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ + { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ + { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */ ++ { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */ + { 0, } + }; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c +index 7f6fd9c5e371b..e289cfdbce075 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c +@@ -4,7 +4,6 @@ + #include "dr_types.h" + + #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64 +-#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024) + + struct mlx5dr_icm_pool { + enum mlx5dr_icm_type icm_type; +@@ -136,37 +135,35 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr) + kvfree(icm_mr); + } + +-static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk) ++static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy) + { +- chunk->ste_arr = kvzalloc(chunk->num_of_entries * +- sizeof(chunk->ste_arr[0]), GFP_KERNEL); +- if (!chunk->ste_arr) +- return -ENOMEM; +- +- chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries * +- DR_STE_SIZE_REDUCED, GFP_KERNEL); +- if (!chunk->hw_ste_arr) +- goto out_free_ste_arr; +- +- chunk->miss_list = kvmalloc(chunk->num_of_entries * +- sizeof(chunk->miss_list[0]), GFP_KERNEL); +- if (!chunk->miss_list) +- goto out_free_hw_ste_arr; ++ /* We support only one type of STE size, both for ConnectX-5 and later ++ * devices. Once the support for match STE which has a larger tag is ++ * added (32B instead of 16B), the STE size for devices later than ++ * ConnectX-5 needs to account for that. ++ */ ++ return DR_STE_SIZE_REDUCED; ++} + +- return 0; ++static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset) ++{ ++ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem; ++ int index = offset / DR_STE_SIZE; + +-out_free_hw_ste_arr: +- kvfree(chunk->hw_ste_arr); +-out_free_ste_arr: +- kvfree(chunk->ste_arr); +- return -ENOMEM; ++ chunk->ste_arr = &buddy->ste_arr[index]; ++ chunk->miss_list = &buddy->miss_list[index]; ++ chunk->hw_ste_arr = buddy->hw_ste_arr + ++ index * dr_icm_buddy_get_ste_size(buddy); + } + + static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk) + { +- kvfree(chunk->miss_list); +- kvfree(chunk->hw_ste_arr); +- kvfree(chunk->ste_arr); ++ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem; ++ ++ memset(chunk->hw_ste_arr, 0, ++ chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy)); ++ memset(chunk->ste_arr, 0, ++ chunk->num_of_entries * sizeof(chunk->ste_arr[0])); + } + + static enum mlx5dr_icm_type +@@ -189,6 +186,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk, + kvfree(chunk); + } + ++static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy) ++{ ++ int num_of_entries = ++ mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz); ++ ++ buddy->ste_arr = kvcalloc(num_of_entries, ++ sizeof(struct mlx5dr_ste), GFP_KERNEL); ++ if (!buddy->ste_arr) ++ return -ENOMEM; ++ ++ /* Preallocate full STE size on non-ConnectX-5 devices since ++ * we need to support both full and reduced with the same cache. ++ */ ++ buddy->hw_ste_arr = kvcalloc(num_of_entries, ++ dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL); ++ if (!buddy->hw_ste_arr) ++ goto free_ste_arr; ++ ++ buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL); ++ if (!buddy->miss_list) ++ goto free_hw_ste_arr; ++ ++ return 0; ++ ++free_hw_ste_arr: ++ kvfree(buddy->hw_ste_arr); ++free_ste_arr: ++ kvfree(buddy->ste_arr); ++ return -ENOMEM; ++} ++ ++static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy) ++{ ++ kvfree(buddy->ste_arr); ++ kvfree(buddy->hw_ste_arr); ++ kvfree(buddy->miss_list); ++} ++ + static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) + { + struct mlx5dr_icm_buddy_mem *buddy; +@@ -208,11 +243,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) + buddy->icm_mr = icm_mr; + buddy->pool = pool; + ++ if (pool->icm_type == DR_ICM_TYPE_STE) { ++ /* Reduce allocations by preallocating and reusing the STE structures */ ++ if (dr_icm_buddy_init_ste_cache(buddy)) ++ goto err_cleanup_buddy; ++ } ++ + /* add it to the -start- of the list in order to search in it first */ + list_add(&buddy->list_node, &pool->buddy_mem_list); + + return 0; + ++err_cleanup_buddy: ++ mlx5dr_buddy_cleanup(buddy); + err_free_buddy: + kvfree(buddy); + free_mr: +@@ -234,6 +277,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy) + + mlx5dr_buddy_cleanup(buddy); + ++ if (buddy->pool->icm_type == DR_ICM_TYPE_STE) ++ dr_icm_buddy_cleanup_ste_cache(buddy); ++ + kvfree(buddy); + } + +@@ -261,34 +307,30 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool, + chunk->byte_size = + mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type); + chunk->seg = seg; ++ chunk->buddy_mem = buddy_mem_pool; + +- if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) { +- mlx5dr_err(pool->dmn, +- "Failed to init ste arrays (order: %d)\n", +- chunk_size); +- goto out_free_chunk; +- } ++ if (pool->icm_type == DR_ICM_TYPE_STE) ++ dr_icm_chunk_ste_init(chunk, offset); + + buddy_mem_pool->used_memory += chunk->byte_size; +- chunk->buddy_mem = buddy_mem_pool; + INIT_LIST_HEAD(&chunk->chunk_list); + + /* chunk now is part of the used_list */ + list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list); + + return chunk; +- +-out_free_chunk: +- kvfree(chunk); +- return NULL; + } + + static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool) + { +- if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL) +- return true; ++ int allow_hot_size; ++ ++ /* sync when hot memory reaches half of the pool size */ ++ allow_hot_size = ++ mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, ++ pool->icm_type) / 2; + +- return false; ++ return pool->hot_memory_size > allow_hot_size; + } + + static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +index 3d0cdc36a91ab..01213045a8a84 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +@@ -13,18 +13,6 @@ static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec) + return (spec->dmac_47_16 || spec->dmac_15_0); + } + +-static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec) +-{ +- return (spec->src_ip_127_96 || spec->src_ip_95_64 || +- spec->src_ip_63_32 || spec->src_ip_31_0); +-} +- +-static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec) +-{ +- return (spec->dst_ip_127_96 || spec->dst_ip_95_64 || +- spec->dst_ip_63_32 || spec->dst_ip_31_0); +-} +- + static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec) + { + return (spec->ip_protocol || spec->frag || spec->tcp_flags || +@@ -480,11 +468,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, + &mask, inner, rx); + + if (outer_ipv == DR_RULE_IPV6) { +- if (dr_mask_is_dst_addr_set(&mask.outer)) ++ if (DR_MASK_IS_DST_IP_SET(&mask.outer)) + mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++], + &mask, inner, rx); + +- if (dr_mask_is_src_addr_set(&mask.outer)) ++ if (DR_MASK_IS_SRC_IP_SET(&mask.outer)) + mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++], + &mask, inner, rx); + +@@ -580,11 +568,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, + &mask, inner, rx); + + if (inner_ipv == DR_RULE_IPV6) { +- if (dr_mask_is_dst_addr_set(&mask.inner)) ++ if (DR_MASK_IS_DST_IP_SET(&mask.inner)) + mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++], + &mask, inner, rx); + +- if (dr_mask_is_src_addr_set(&mask.inner)) ++ if (DR_MASK_IS_SRC_IP_SET(&mask.inner)) + mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++], + &mask, inner, rx); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +index 219a5474a8a46..7e711b2037b5b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +@@ -602,12 +602,34 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx, + used_hw_action_num); + } + ++static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn, ++ struct mlx5dr_match_spec *spec) ++{ ++ if (spec->ip_version) { ++ if (spec->ip_version != 0xf) { ++ mlx5dr_err(dmn, ++ "Partial ip_version mask with src/dst IP is not supported\n"); ++ return -EINVAL; ++ } ++ } else if (spec->ethertype != 0xffff && ++ (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) { ++ mlx5dr_err(dmn, ++ "Partial/no ethertype mask with src/dst IP is not supported\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ + int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, + u8 match_criteria, + struct mlx5dr_match_param *mask, + struct mlx5dr_match_param *value) + { +- if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) { ++ if (value) ++ return 0; ++ ++ if (match_criteria & DR_MATCHER_CRITERIA_MISC) { + if (mask->misc.source_port && mask->misc.source_port != 0xffff) { + mlx5dr_err(dmn, + "Partial mask source_port is not supported\n"); +@@ -621,6 +643,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, + } + } + ++ if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) && ++ dr_ste_build_pre_check_spec(dmn, &mask->outer)) ++ return -EINVAL; ++ ++ if ((match_criteria & DR_MATCHER_CRITERIA_INNER) && ++ dr_ste_build_pre_check_spec(dmn, &mask->inner)) ++ return -EINVAL; ++ + return 0; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +index 2333c2439c287..5f98db648e865 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +@@ -739,6 +739,16 @@ struct mlx5dr_match_param { + (_misc3)->icmpv4_code || \ + (_misc3)->icmpv4_header_data) + ++#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \ ++ (_spec)->src_ip_95_64 || \ ++ (_spec)->src_ip_63_32 || \ ++ (_spec)->src_ip_31_0) ++ ++#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \ ++ (_spec)->dst_ip_95_64 || \ ++ (_spec)->dst_ip_63_32 || \ ++ (_spec)->dst_ip_31_0) ++ + struct mlx5dr_esw_caps { + u64 drop_icm_address_rx; + u64 drop_icm_address_tx; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +index 2632d5ae9bc0e..ac4651235b34c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +@@ -222,7 +222,11 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst) + dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID; + } + +-#define MLX5_FLOW_CONTEXT_ACTION_MAX 32 ++/* We want to support a rule with 32 destinations, which means we need to ++ * account for 32 destinations plus usually a counter plus one more action ++ * for a multi-destination flow table. ++ */ ++#define MLX5_FLOW_CONTEXT_ACTION_MAX 34 + static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *group, +@@ -392,9 +396,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, + enum mlx5_flow_destination_type type = dst->dest_attr.type; + u32 id; + +- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || +- num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) { +- err = -ENOSPC; ++ if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || ++ num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { ++ err = -EOPNOTSUPP; + goto free_actions; + } + +@@ -464,8 +468,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, + MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + +- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { +- err = -ENOSPC; ++ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || ++ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { ++ err = -EOPNOTSUPP; + goto free_actions; + } + +@@ -485,14 +490,28 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, + params.match_sz = match_sz; + params.match_buf = (u64 *)fte->val; + if (num_term_actions == 1) { +- if (term_actions->reformat) ++ if (term_actions->reformat) { ++ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { ++ err = -EOPNOTSUPP; ++ goto free_actions; ++ } + actions[num_actions++] = term_actions->reformat; ++ } + ++ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { ++ err = -EOPNOTSUPP; ++ goto free_actions; ++ } + actions[num_actions++] = term_actions->dest; + } else if (num_term_actions > 1) { + bool ignore_flow_level = + !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); + ++ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || ++ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { ++ err = -EOPNOTSUPP; ++ goto free_actions; ++ } + tmp_action = mlx5dr_action_create_mult_dest_tbl(domain, + term_actions, + num_term_actions, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +index c7c93131b762b..dfa223415fe24 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +@@ -160,6 +160,11 @@ struct mlx5dr_icm_buddy_mem { + * sync_ste command sets them free. + */ + struct list_head hot_list; ++ ++ /* Memory optimisation */ ++ struct mlx5dr_ste *ste_arr; ++ struct list_head *miss_list; ++ u8 *hw_ste_arr; + }; + + int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy, +diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +index 0a326e04e6923..cb43651ea9ba8 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c ++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +@@ -922,8 +922,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, + int port, bool mod) + { + struct nfp_flower_priv *priv = app->priv; +- int ida_idx = NFP_MAX_MAC_INDEX, err; + struct nfp_tun_offloaded_mac *entry; ++ int ida_idx = -1, err; + u16 nfp_mac_idx = 0; + + entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); +@@ -997,7 +997,7 @@ err_remove_hash: + err_free_entry: + kfree(entry); + err_free_ida: +- if (ida_idx != NFP_MAX_MAC_INDEX) ++ if (ida_idx != -1) + ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); + + return err; +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c +index e7065c9a8e389..e8be35b1b6c96 100644 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c +@@ -1427,6 +1427,8 @@ static int temac_probe(struct platform_device *pdev) + lp->indirect_lock = devm_kmalloc(&pdev->dev, + sizeof(*lp->indirect_lock), + GFP_KERNEL); ++ if (!lp->indirect_lock) ++ return -ENOMEM; + spin_lock_init(lp->indirect_lock); + } + +diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c +index 5f4cd24a0241d..4eba5a91075c0 100644 +--- a/drivers/net/mdio/mdio-ipq4019.c ++++ b/drivers/net/mdio/mdio-ipq4019.c +@@ -200,7 +200,11 @@ static int ipq_mdio_reset(struct mii_bus *bus) + if (ret) + return ret; + +- return clk_prepare_enable(priv->mdio_clk); ++ ret = clk_prepare_enable(priv->mdio_clk); ++ if (ret == 0) ++ mdelay(10); ++ ++ return ret; + } + + static int ipq4019_mdio_probe(struct platform_device *pdev) +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c +index eb3817d70f2b8..9b4dfa3001d6e 100644 +--- a/drivers/net/usb/cdc_ether.c ++++ b/drivers/net/usb/cdc_ether.c +@@ -583,6 +583,11 @@ static const struct usb_device_id products[] = { + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + ++#define ZAURUS_FAKE_INTERFACE \ ++ .bInterfaceClass = USB_CLASS_COMM, \ ++ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ ++ .bInterfaceProtocol = USB_CDC_PROTO_NONE ++ + /* SA-1100 based Sharp Zaurus ("collie"), or compatible; + * wire-incompatible with true CDC Ethernet implementations. + * (And, it seems, needlessly so...) +@@ -636,6 +641,13 @@ static const struct usb_device_id products[] = { + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_MASTER_INTERFACE, + .driver_info = 0, ++}, { ++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO ++ | USB_DEVICE_ID_MATCH_DEVICE, ++ .idVendor = 0x04DD, ++ .idProduct = 0x9032, /* SL-6000 */ ++ ZAURUS_FAKE_INTERFACE, ++ .driver_info = 0, + }, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c +index e303b522efb50..15f91d691bba3 100644 +--- a/drivers/net/usb/cdc_ncm.c ++++ b/drivers/net/usb/cdc_ncm.c +@@ -1715,10 +1715,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) + { + struct sk_buff *skb; + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; +- int len; ++ unsigned int len; + int nframes; + int x; +- int offset; ++ unsigned int offset; + union { + struct usb_cdc_ncm_ndp16 *ndp16; + struct usb_cdc_ncm_ndp32 *ndp32; +@@ -1790,8 +1790,8 @@ next_ndp: + break; + } + +- /* sanity checking */ +- if (((offset + len) > skb_in->len) || ++ /* sanity checking - watch out for integer wrap*/ ++ if ((offset > skb_in->len) || (len > skb_in->len - offset) || + (len > ctx->rx_max) || (len < ETH_HLEN)) { + netif_dbg(dev, rx_err, dev->net, + "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n", +diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c +index b658510cc9a42..5a53e63d33a60 100644 +--- a/drivers/net/usb/sr9700.c ++++ b/drivers/net/usb/sr9700.c +@@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) + /* ignore the CRC length */ + len = (skb->data[1] | (skb->data[2] << 8)) - 4; + +- if (len > ETH_FRAME_LEN) ++ if (len > ETH_FRAME_LEN || len > skb->len) + return 0; + + /* the last packet of current skb */ +diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c +index 8e717a0b559b3..7984f2157d222 100644 +--- a/drivers/net/usb/zaurus.c ++++ b/drivers/net/usb/zaurus.c +@@ -256,6 +256,11 @@ static const struct usb_device_id products [] = { + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + ++#define ZAURUS_FAKE_INTERFACE \ ++ .bInterfaceClass = USB_CLASS_COMM, \ ++ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ ++ .bInterfaceProtocol = USB_CDC_PROTO_NONE ++ + /* SA-1100 based Sharp Zaurus ("collie"), or compatible. */ + { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO +@@ -313,6 +318,13 @@ static const struct usb_device_id products [] = { + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_MASTER_INTERFACE, + .driver_info = ZAURUS_PXA_INFO, ++}, { ++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO ++ | USB_DEVICE_ID_MATCH_DEVICE, ++ .idVendor = 0x04DD, ++ .idProduct = 0x9032, /* SL-6000 */ ++ ZAURUS_FAKE_INTERFACE, ++ .driver_info = (unsigned long)&bogus_mdlm_info, + }, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index 352766aa3122e..5785f6abf1945 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -1936,7 +1936,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) + if (blk_queue_is_zoned(ns->queue)) { + ret = nvme_revalidate_zones(ns); + if (ret && !nvme_first_scan(ns->disk)) +- goto out; ++ return ret; + } + + if (nvme_ns_head_multipath(ns->head)) { +@@ -1951,16 +1951,16 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) + return 0; + + out_unfreeze: +- blk_mq_unfreeze_queue(ns->disk->queue); +-out: + /* + * If probing fails due an unsupported feature, hide the block device, + * but still allow other access. + */ + if (ret == -ENODEV) { + ns->disk->flags |= GENHD_FL_HIDDEN; ++ set_bit(NVME_NS_READY, &ns->flags); + ret = 0; + } ++ blk_mq_unfreeze_queue(ns->disk->queue); + return ret; + } + +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c +index 23a38dcf0fc4d..9fd1602b539d9 100644 +--- a/drivers/nvmem/core.c ++++ b/drivers/nvmem/core.c +@@ -771,7 +771,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) + + if (config->wp_gpio) + nvmem->wp_gpio = config->wp_gpio; +- else ++ else if (!config->ignore_wp) + nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", + GPIOD_OUT_HIGH); + if (IS_ERR(nvmem->wp_gpio)) { +diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c +index 357e9a293edf7..2a3bf82aa4e26 100644 +--- a/drivers/pci/controller/pci-mvebu.c ++++ b/drivers/pci/controller/pci-mvebu.c +@@ -1288,7 +1288,8 @@ static int mvebu_pcie_probe(struct platform_device *pdev) + * indirectly via kernel emulated PCI bridge driver. + */ + mvebu_pcie_setup_hw(port); +- mvebu_pcie_set_local_dev_nr(port, 0); ++ mvebu_pcie_set_local_dev_nr(port, 1); ++ mvebu_pcie_set_local_bus_nr(port, 0); + } + + pcie->nports = i; +diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c +index 49e32684dbb25..ecab6bf63dc6d 100644 +--- a/drivers/pinctrl/pinctrl-k210.c ++++ b/drivers/pinctrl/pinctrl-k210.c +@@ -482,7 +482,7 @@ static int k210_pinconf_get_drive(unsigned int max_strength_ua) + { + int i; + +- for (i = K210_PC_DRIVE_MAX; i; i--) { ++ for (i = K210_PC_DRIVE_MAX; i >= 0; i--) { + if (k210_pinconf_drive_strength[i] <= max_strength_ua) + return i; + } +@@ -527,7 +527,7 @@ static int k210_pinconf_set_param(struct pinctrl_dev *pctldev, + case PIN_CONFIG_BIAS_PULL_UP: + if (!arg) + return -EINVAL; +- val |= K210_PC_PD; ++ val |= K210_PC_PU; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + arg *= 1000; +diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c +index abac3eec565e8..444ec81ba02d7 100644 +--- a/drivers/platform/surface/surface3_power.c ++++ b/drivers/platform/surface/surface3_power.c +@@ -232,14 +232,21 @@ static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix) + } + bix->last_full_charg_capacity = ret; + +- /* get serial number */ ++ /* ++ * Get serial number, on some devices (with unofficial replacement ++ * battery?) reading any of the serial number range addresses gets ++ * nacked in this case just leave the serial number empty. ++ */ + ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO, + sizeof(buf), buf); +- if (ret != sizeof(buf)) { ++ if (ret == -EREMOTEIO) { ++ /* no serial number available */ ++ } else if (ret != sizeof(buf)) { + dev_err(&client->dev, "Error reading serial no: %d\n", ret); + return ret; ++ } else { ++ snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); + } +- snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); + + /* get cycle count */ + ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT); +diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c +index cfa222c9bd5e7..78f31b61a2aac 100644 +--- a/drivers/spi/spi-zynq-qspi.c ++++ b/drivers/spi/spi-zynq-qspi.c +@@ -570,6 +570,9 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem, + + if (op->dummy.nbytes) { + tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL); ++ if (!tmpbuf) ++ return -ENOMEM; ++ + memset(tmpbuf, 0xff, op->dummy.nbytes); + reinit_completion(&xqspi->data_completion); + xqspi->txbuf = tmpbuf; +diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c +index abe9395a0aefd..861a154144e66 100644 +--- a/drivers/staging/fbtft/fb_st7789v.c ++++ b/drivers/staging/fbtft/fb_st7789v.c +@@ -144,6 +144,8 @@ static int init_display(struct fbtft_par *par) + { + int rc; + ++ par->fbtftops.reset(par); ++ + rc = init_tearing_effect_line(par); + if (rc) + return rc; +diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +index 8502b7d8df896..68f61a7389303 100644 +--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c ++++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +@@ -404,6 +404,10 @@ static void int3400_notify(acpi_handle handle, + thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event); + thermal_prop[4] = NULL; + kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop); ++ kfree(thermal_prop[0]); ++ kfree(thermal_prop[1]); ++ kfree(thermal_prop[2]); ++ kfree(thermal_prop[3]); + } + + static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c +index 9c5211f2ea84c..2ec9eeaabac94 100644 +--- a/drivers/tty/n_gsm.c ++++ b/drivers/tty/n_gsm.c +@@ -439,7 +439,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) + modembits |= MDM_RTR; + if (dlci->modem_tx & TIOCM_RI) + modembits |= MDM_IC; +- if (dlci->modem_tx & TIOCM_CD) ++ if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator) + modembits |= MDM_DV; + return modembits; + } +@@ -448,7 +448,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) + * gsm_print_packet - display a frame for debug + * @hdr: header to print before decode + * @addr: address EA from the frame +- * @cr: C/R bit from the frame ++ * @cr: C/R bit seen as initiator + * @control: control including PF bit + * @data: following data bytes + * @dlen: length of data +@@ -548,7 +548,7 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len) + * gsm_send - send a control frame + * @gsm: our GSM mux + * @addr: address for control frame +- * @cr: command/response bit ++ * @cr: command/response bit seen as initiator + * @control: control byte including PF bit + * + * Format up and transmit a control frame. These do not go via the +@@ -563,11 +563,15 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) + int len; + u8 cbuf[10]; + u8 ibuf[3]; ++ int ocr; ++ ++ /* toggle C/R coding if not initiator */ ++ ocr = cr ^ (gsm->initiator ? 0 : 1); + + switch (gsm->encoding) { + case 0: + cbuf[0] = GSM0_SOF; +- cbuf[1] = (addr << 2) | (cr << 1) | EA; ++ cbuf[1] = (addr << 2) | (ocr << 1) | EA; + cbuf[2] = control; + cbuf[3] = EA; /* Length of data = 0 */ + cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3); +@@ -577,7 +581,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) + case 1: + case 2: + /* Control frame + packing (but not frame stuffing) in mode 1 */ +- ibuf[0] = (addr << 2) | (cr << 1) | EA; ++ ibuf[0] = (addr << 2) | (ocr << 1) | EA; + ibuf[1] = control; + ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2); + /* Stuffing may double the size worst case */ +@@ -611,7 +615,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) + + static inline void gsm_response(struct gsm_mux *gsm, int addr, int control) + { +- gsm_send(gsm, addr, 1, control); ++ gsm_send(gsm, addr, 0, control); + } + + /** +@@ -1017,25 +1021,25 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data, + * @tty: virtual tty bound to the DLCI + * @dlci: DLCI to affect + * @modem: modem bits (full EA) +- * @clen: command length ++ * @slen: number of signal octets + * + * Used when a modem control message or line state inline in adaption + * layer 2 is processed. Sort out the local modem state and throttles + */ + + static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci, +- u32 modem, int clen) ++ u32 modem, int slen) + { + int mlines = 0; + u8 brk = 0; + int fc; + +- /* The modem status command can either contain one octet (v.24 signals) +- or two octets (v.24 signals + break signals). The length field will +- either be 2 or 3 respectively. This is specified in section +- 5.4.6.3.7 of the 27.010 mux spec. */ ++ /* The modem status command can either contain one octet (V.24 signals) ++ * or two octets (V.24 signals + break signals). This is specified in ++ * section 5.4.6.3.7 of the 07.10 mux spec. ++ */ + +- if (clen == 2) ++ if (slen == 1) + modem = modem & 0x7f; + else { + brk = modem & 0x7f; +@@ -1092,6 +1096,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) + unsigned int brk = 0; + struct gsm_dlci *dlci; + int len = clen; ++ int slen; + const u8 *dp = data; + struct tty_struct *tty; + +@@ -1111,6 +1116,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) + return; + dlci = gsm->dlci[addr]; + ++ slen = len; + while (gsm_read_ea(&modem, *dp++) == 0) { + len--; + if (len == 0) +@@ -1127,7 +1133,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) + modem |= (brk & 0x7f); + } + tty = tty_port_tty_get(&dlci->port); +- gsm_process_modem(tty, dlci, modem, clen); ++ gsm_process_modem(tty, dlci, modem, slen); + if (tty) { + tty_wakeup(tty); + tty_kref_put(tty); +@@ -1451,6 +1457,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci) + if (dlci->addr != 0) { + tty_port_tty_hangup(&dlci->port, false); + kfifo_reset(&dlci->fifo); ++ /* Ensure that gsmtty_open() can return. */ ++ tty_port_set_initialized(&dlci->port, 0); ++ wake_up_interruptible(&dlci->port.open_wait); + } else + dlci->gsm->dead = true; + /* Unregister gsmtty driver,report gsmtty dev remove uevent for user */ +@@ -1514,7 +1523,7 @@ static void gsm_dlci_t1(struct timer_list *t) + dlci->mode = DLCI_MODE_ADM; + gsm_dlci_open(dlci); + } else { +- gsm_dlci_close(dlci); ++ gsm_dlci_begin_close(dlci); /* prevent half open link */ + } + + break; +@@ -1593,6 +1602,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) + struct tty_struct *tty; + unsigned int modem = 0; + int len = clen; ++ int slen = 0; + + if (debug & 16) + pr_debug("%d bytes for tty\n", len); +@@ -1605,12 +1615,14 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) + case 2: /* Asynchronous serial with line state in each frame */ + while (gsm_read_ea(&modem, *data++) == 0) { + len--; ++ slen++; + if (len == 0) + return; + } ++ slen++; + tty = tty_port_tty_get(port); + if (tty) { +- gsm_process_modem(tty, dlci, modem, clen); ++ gsm_process_modem(tty, dlci, modem, slen); + tty_kref_put(tty); + } + fallthrough; +@@ -1748,7 +1760,12 @@ static void gsm_dlci_release(struct gsm_dlci *dlci) + gsm_destroy_network(dlci); + mutex_unlock(&dlci->mutex); + +- tty_hangup(tty); ++ /* We cannot use tty_hangup() because in tty_kref_put() the tty ++ * driver assumes that the hangup queue is free and reuses it to ++ * queue release_one_tty() -> NULL pointer panic in ++ * process_one_work(). ++ */ ++ tty_vhangup(tty); + + tty_port_tty_set(&dlci->port, NULL); + tty_kref_put(tty); +@@ -1800,10 +1817,10 @@ static void gsm_queue(struct gsm_mux *gsm) + goto invalid; + + cr = gsm->address & 1; /* C/R bit */ ++ cr ^= gsm->initiator ? 0 : 1; /* Flip so 1 always means command */ + + gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len); + +- cr ^= 1 - gsm->initiator; /* Flip so 1 always means command */ + dlci = gsm->dlci[address]; + + switch (gsm->control) { +@@ -3237,9 +3254,9 @@ static void gsmtty_throttle(struct tty_struct *tty) + if (dlci->state == DLCI_CLOSED) + return; + if (C_CRTSCTS(tty)) +- dlci->modem_tx &= ~TIOCM_DTR; ++ dlci->modem_tx &= ~TIOCM_RTS; + dlci->throttled = true; +- /* Send an MSC with DTR cleared */ ++ /* Send an MSC with RTS cleared */ + gsmtty_modem_update(dlci, 0); + } + +@@ -3249,9 +3266,9 @@ static void gsmtty_unthrottle(struct tty_struct *tty) + if (dlci->state == DLCI_CLOSED) + return; + if (C_CRTSCTS(tty)) +- dlci->modem_tx |= TIOCM_DTR; ++ dlci->modem_tx |= TIOCM_RTS; + dlci->throttled = false; +- /* Send an MSC with DTR set */ ++ /* Send an MSC with RTS set */ + gsmtty_modem_update(dlci, 0); + } + +diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c +index 64e7e6c8145f8..38d1c0748533c 100644 +--- a/drivers/tty/serial/sc16is7xx.c ++++ b/drivers/tty/serial/sc16is7xx.c +@@ -734,12 +734,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id) + static void sc16is7xx_tx_proc(struct kthread_work *ws) + { + struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port); ++ struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + + if ((port->rs485.flags & SER_RS485_ENABLED) && + (port->rs485.delay_rts_before_send > 0)) + msleep(port->rs485.delay_rts_before_send); + ++ mutex_lock(&s->efr_lock); + sc16is7xx_handle_tx(port); ++ mutex_unlock(&s->efr_lock); + } + + static void sc16is7xx_reconf_rs485(struct uart_port *port) +diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h +index 37185eb66ae4c..f76c30083fbc9 100644 +--- a/drivers/usb/dwc2/core.h ++++ b/drivers/usb/dwc2/core.h +@@ -1416,6 +1416,7 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg); + void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2); + int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode); + #define dwc2_is_device_connected(hsotg) (hsotg->connected) ++#define dwc2_is_device_enabled(hsotg) (hsotg->enabled) + int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg); + int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup); + int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg); +@@ -1452,6 +1453,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, + int testmode) + { return 0; } + #define dwc2_is_device_connected(hsotg) (0) ++#define dwc2_is_device_enabled(hsotg) (0) + static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) + { return 0; } + static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, +diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c +index aa6eb76f64ddc..36f2c38416e5e 100644 +--- a/drivers/usb/dwc2/drd.c ++++ b/drivers/usb/dwc2/drd.c +@@ -109,8 +109,10 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role) + already = dwc2_ovr_avalid(hsotg, true); + } else if (role == USB_ROLE_DEVICE) { + already = dwc2_ovr_bvalid(hsotg, true); +- /* This clear DCTL.SFTDISCON bit */ +- dwc2_hsotg_core_connect(hsotg); ++ if (dwc2_is_device_enabled(hsotg)) { ++ /* This clear DCTL.SFTDISCON bit */ ++ dwc2_hsotg_core_connect(hsotg); ++ } + } else { + if (dwc2_is_device_mode(hsotg)) { + if (!dwc2_ovr_bvalid(hsotg, false)) +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c +index 7ff8fc8f79a9b..1ecedbb1684c8 100644 +--- a/drivers/usb/dwc3/dwc3-pci.c ++++ b/drivers/usb/dwc3/dwc3-pci.c +@@ -85,8 +85,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = { + static struct gpiod_lookup_table platform_bytcr_gpios = { + .dev_id = "0000:00:16.0", + .table = { +- GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH), +- GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH), ++ GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH), ++ GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH), + {} + }, + }; +@@ -119,6 +119,13 @@ static const struct property_entry dwc3_pci_intel_properties[] = { + {} + }; + ++static const struct property_entry dwc3_pci_intel_byt_properties[] = { ++ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"), ++ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"), ++ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), ++ {} ++}; ++ + static const struct property_entry dwc3_pci_mrfld_properties[] = { + PROPERTY_ENTRY_STRING("dr_mode", "otg"), + PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"), +@@ -161,6 +168,10 @@ static const struct software_node dwc3_pci_intel_swnode = { + .properties = dwc3_pci_intel_properties, + }; + ++static const struct software_node dwc3_pci_intel_byt_swnode = { ++ .properties = dwc3_pci_intel_byt_properties, ++}; ++ + static const struct software_node dwc3_pci_intel_mrfld_swnode = { + .properties = dwc3_pci_mrfld_properties, + }; +@@ -344,7 +355,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT), +- (kernel_ulong_t) &dwc3_pci_intel_swnode, }, ++ (kernel_ulong_t) &dwc3_pci_intel_byt_swnode, }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), + (kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, }, +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 7aab9116b0256..0566a841dca25 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -4131,9 +4131,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) + unsigned long flags; + irqreturn_t ret = IRQ_NONE; + ++ local_bh_disable(); + spin_lock_irqsave(&dwc->lock, flags); + ret = dwc3_process_event_buf(evt); + spin_unlock_irqrestore(&dwc->lock, flags); ++ local_bh_enable(); + + return ret; + } +diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c +index d9ed651f06ac3..0f14c5291af07 100644 +--- a/drivers/usb/gadget/function/rndis.c ++++ b/drivers/usb/gadget/function/rndis.c +@@ -922,6 +922,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v) + params->resp_avail = resp_avail; + params->v = v; + INIT_LIST_HEAD(¶ms->resp_queue); ++ spin_lock_init(¶ms->resp_lock); + pr_debug("%s: configNr = %d\n", __func__, i); + + return params; +@@ -1015,12 +1016,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf) + { + rndis_resp_t *r, *n; + ++ spin_lock(¶ms->resp_lock); + list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { + if (r->buf == buf) { + list_del(&r->list); + kfree(r); + } + } ++ spin_unlock(¶ms->resp_lock); + } + EXPORT_SYMBOL_GPL(rndis_free_response); + +@@ -1030,14 +1033,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length) + + if (!length) return NULL; + ++ spin_lock(¶ms->resp_lock); + list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { + if (!r->send) { + r->send = 1; + *length = r->length; ++ spin_unlock(¶ms->resp_lock); + return r->buf; + } + } + ++ spin_unlock(¶ms->resp_lock); + return NULL; + } + EXPORT_SYMBOL_GPL(rndis_get_next_response); +@@ -1054,7 +1060,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length) + r->length = length; + r->send = 0; + ++ spin_lock(¶ms->resp_lock); + list_add_tail(&r->list, ¶ms->resp_queue); ++ spin_unlock(¶ms->resp_lock); + return r; + } + +diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h +index f6167f7fea82b..6206b8b7490f6 100644 +--- a/drivers/usb/gadget/function/rndis.h ++++ b/drivers/usb/gadget/function/rndis.h +@@ -174,6 +174,7 @@ typedef struct rndis_params { + void (*resp_avail)(void *v); + void *v; + struct list_head resp_queue; ++ spinlock_t resp_lock; + } rndis_params; + + /* RNDIS Message parser and other useless functions */ +diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c +index 857159dd5ae05..540824534e962 100644 +--- a/drivers/usb/gadget/udc/udc-xilinx.c ++++ b/drivers/usb/gadget/udc/udc-xilinx.c +@@ -1615,6 +1615,8 @@ static void xudc_getstatus(struct xusb_udc *udc) + break; + case USB_RECIP_ENDPOINT: + epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; ++ if (epnum >= XUSB_MAX_ENDPOINTS) ++ goto stall; + target_ep = &udc->ep[epnum]; + epcfgreg = udc->read_fn(udc->addr + target_ep->offset); + halt = epcfgreg & XUSB_EP_CFG_STALL_MASK; +@@ -1682,6 +1684,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc) + case USB_RECIP_ENDPOINT: + if (!udc->setup.wValue) { + endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; ++ if (endpoint >= XUSB_MAX_ENDPOINTS) { ++ xudc_ep0_stall(udc); ++ return; ++ } + target_ep = &udc->ep[endpoint]; + outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK; + outinbit = outinbit >> 7; +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index f5b1bcc875ded..d7c0bf494d930 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -1091,6 +1091,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) + int retval = 0; + bool comp_timer_running = false; + bool pending_portevent = false; ++ bool reinit_xhc = false; + + if (!hcd->state) + return 0; +@@ -1107,10 +1108,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) + set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); + + spin_lock_irq(&xhci->lock); +- if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) +- hibernated = true; + +- if (!hibernated) { ++ if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) ++ reinit_xhc = true; ++ ++ if (!reinit_xhc) { + /* + * Some controllers might lose power during suspend, so wait + * for controller not ready bit to clear, just as in xHC init. +@@ -1143,12 +1145,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) + spin_unlock_irq(&xhci->lock); + return -ETIMEDOUT; + } +- temp = readl(&xhci->op_regs->status); + } + +- /* If restore operation fails, re-initialize the HC during resume */ +- if ((temp & STS_SRE) || hibernated) { ++ temp = readl(&xhci->op_regs->status); + ++ /* re-initialize the HC on Restore Error, or Host Controller Error */ ++ if (temp & (STS_SRE | STS_HCE)) { ++ reinit_xhc = true; ++ xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); ++ } ++ ++ if (reinit_xhc) { + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && + !(xhci_all_ports_seen_u0(xhci))) { + del_timer_sync(&xhci->comp_mode_recovery_timer); +@@ -1604,9 +1611,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag + struct urb_priv *urb_priv; + int num_tds; + +- if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, +- true, true, __func__) <= 0) ++ if (!urb) + return -EINVAL; ++ ret = xhci_check_args(hcd, urb->dev, urb->ep, ++ true, true, __func__); ++ if (ret <= 0) ++ return ret ? ret : -EINVAL; + + slot_id = urb->dev->slot_id; + ep_index = xhci_get_endpoint_index(&urb->ep->desc); +@@ -3323,7 +3333,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, + return -EINVAL; + ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); + if (ret <= 0) +- return -EINVAL; ++ return ret ? ret : -EINVAL; + if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { + xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" + " descriptor for ep 0x%x does not support streams\n", +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c +index 58cba8ee0277a..2798fca712612 100644 +--- a/drivers/usb/serial/ch341.c ++++ b/drivers/usb/serial/ch341.c +@@ -81,7 +81,6 @@ + #define CH341_QUIRK_SIMULATE_BREAK BIT(1) + + static const struct usb_device_id id_table[] = { +- { USB_DEVICE(0x1a86, 0x5512) }, + { USB_DEVICE(0x1a86, 0x5523) }, + { USB_DEVICE(0x1a86, 0x7522) }, + { USB_DEVICE(0x1a86, 0x7523) }, +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 962e9943fc20e..e7755d9cfc61a 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb); + + #define DELL_PRODUCT_5821E 0x81d7 + #define DELL_PRODUCT_5821E_ESIM 0x81e0 ++#define DELL_PRODUCT_5829E_ESIM 0x81e4 ++#define DELL_PRODUCT_5829E 0x81e6 + + #define KYOCERA_VENDOR_ID 0x0c88 + #define KYOCERA_PRODUCT_KPC650 0x17da +@@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = { + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM), + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, ++ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E), ++ .driver_info = RSVD(0) | RSVD(6) }, ++ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM), ++ .driver_info = RSVD(0) | RSVD(6) }, + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, +@@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = { + .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ + .driver_info = NCTRL(2) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */ ++ .driver_info = NCTRL(2) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */ ++ .driver_info = NCTRL(2) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ + .driver_info = NCTRL(0) | ZLP }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ + .driver_info = NCTRL(0) | ZLP }, ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */ ++ .driver_info = NCTRL(0) | ZLP }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), + .driver_info = RSVD(1) }, +diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c +index 6d27a5b5e3cac..7ffcda94d323a 100644 +--- a/drivers/usb/typec/tipd/core.c ++++ b/drivers/usb/typec/tipd/core.c +@@ -761,12 +761,12 @@ static int tps6598x_probe(struct i2c_client *client) + + ret = tps6598x_read32(tps, TPS_REG_STATUS, &status); + if (ret < 0) +- return ret; ++ goto err_clear_mask; + trace_tps6598x_status(status); + + ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf); + if (ret < 0) +- return ret; ++ goto err_clear_mask; + + /* + * This fwnode has a "compatible" property, but is never populated as a +@@ -855,7 +855,8 @@ err_role_put: + usb_role_switch_put(tps->role_sw); + err_fwnode_put: + fwnode_handle_put(fwnode); +- ++err_clear_mask: ++ tps6598x_write64(tps, TPS_REG_INT_MASK1, 0); + return ret; + } + +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c +index d6ca1c7ad513f..37f0b4274113c 100644 +--- a/drivers/vhost/vsock.c ++++ b/drivers/vhost/vsock.c +@@ -629,16 +629,18 @@ err: + return ret; + } + +-static int vhost_vsock_stop(struct vhost_vsock *vsock) ++static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner) + { + size_t i; +- int ret; ++ int ret = 0; + + mutex_lock(&vsock->dev.mutex); + +- ret = vhost_dev_check_owner(&vsock->dev); +- if (ret) +- goto err; ++ if (check_owner) { ++ ret = vhost_dev_check_owner(&vsock->dev); ++ if (ret) ++ goto err; ++ } + + for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { + struct vhost_virtqueue *vq = &vsock->vqs[i]; +@@ -753,7 +755,12 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) + * inefficient. Room for improvement here. */ + vsock_for_each_connected_socket(vhost_vsock_reset_orphans); + +- vhost_vsock_stop(vsock); ++ /* Don't check the owner, because we are in the release path, so we ++ * need to stop the vsock device in any case. ++ * vhost_vsock_stop() can not fail in this case, so we don't need to ++ * check the return code. ++ */ ++ vhost_vsock_stop(vsock, false); + vhost_vsock_flush(vsock); + vhost_dev_stop(&vsock->dev); + +@@ -868,7 +875,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, + if (start) + return vhost_vsock_start(vsock); + else +- return vhost_vsock_stop(vsock); ++ return vhost_vsock_stop(vsock, true); + case VHOST_GET_FEATURES: + features = VHOST_VSOCK_FEATURES; + if (copy_to_user(argp, &features, sizeof(features))) +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 5fe5eccb3c874..269094176b8b3 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -3315,7 +3315,7 @@ void btrfs_exclop_finish(struct btrfs_fs_info *fs_info); + int __init btrfs_auto_defrag_init(void); + void __cold btrfs_auto_defrag_exit(void); + int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, +- struct btrfs_inode *inode); ++ struct btrfs_inode *inode, u32 extent_thresh); + int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); + void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info); + int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index 11204dbbe0530..a0179cc62913b 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -50,11 +50,14 @@ struct inode_defrag { + /* root objectid */ + u64 root; + +- /* last offset we were able to defrag */ +- u64 last_offset; +- +- /* if we've wrapped around back to zero once already */ +- int cycled; ++ /* ++ * The extent size threshold for autodefrag. ++ * ++ * This value is different for compressed/non-compressed extents, ++ * thus needs to be passed from higher layer. ++ * (aka, inode_should_defrag()) ++ */ ++ u32 extent_thresh; + }; + + static int __compare_inode_defrag(struct inode_defrag *defrag1, +@@ -107,8 +110,8 @@ static int __btrfs_add_inode_defrag(struct btrfs_inode *inode, + */ + if (defrag->transid < entry->transid) + entry->transid = defrag->transid; +- if (defrag->last_offset > entry->last_offset) +- entry->last_offset = defrag->last_offset; ++ entry->extent_thresh = min(defrag->extent_thresh, ++ entry->extent_thresh); + return -EEXIST; + } + } +@@ -134,7 +137,7 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info) + * enabled + */ + int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, +- struct btrfs_inode *inode) ++ struct btrfs_inode *inode, u32 extent_thresh) + { + struct btrfs_root *root = inode->root; + struct btrfs_fs_info *fs_info = root->fs_info; +@@ -160,6 +163,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, + defrag->ino = btrfs_ino(inode); + defrag->transid = transid; + defrag->root = root->root_key.objectid; ++ defrag->extent_thresh = extent_thresh; + + spin_lock(&fs_info->defrag_inodes_lock); + if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) { +@@ -178,34 +182,6 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, + return 0; + } + +-/* +- * Requeue the defrag object. If there is a defrag object that points to +- * the same inode in the tree, we will merge them together (by +- * __btrfs_add_inode_defrag()) and free the one that we want to requeue. +- */ +-static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode, +- struct inode_defrag *defrag) +-{ +- struct btrfs_fs_info *fs_info = inode->root->fs_info; +- int ret; +- +- if (!__need_auto_defrag(fs_info)) +- goto out; +- +- /* +- * Here we don't check the IN_DEFRAG flag, because we need merge +- * them together. +- */ +- spin_lock(&fs_info->defrag_inodes_lock); +- ret = __btrfs_add_inode_defrag(inode, defrag); +- spin_unlock(&fs_info->defrag_inodes_lock); +- if (ret) +- goto out; +- return; +-out: +- kmem_cache_free(btrfs_inode_defrag_cachep, defrag); +-} +- + /* + * pick the defragable inode that we want, if it doesn't exist, we will get + * the next one. +@@ -278,8 +254,14 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, + struct btrfs_root *inode_root; + struct inode *inode; + struct btrfs_ioctl_defrag_range_args range; +- int num_defrag; +- int ret; ++ int ret = 0; ++ u64 cur = 0; ++ ++again: ++ if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) ++ goto cleanup; ++ if (!__need_auto_defrag(fs_info)) ++ goto cleanup; + + /* get the inode */ + inode_root = btrfs_get_fs_root(fs_info, defrag->root, true); +@@ -295,39 +277,30 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, + goto cleanup; + } + ++ if (cur >= i_size_read(inode)) { ++ iput(inode); ++ goto cleanup; ++ } ++ + /* do a chunk of defrag */ + clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); + memset(&range, 0, sizeof(range)); + range.len = (u64)-1; +- range.start = defrag->last_offset; ++ range.start = cur; ++ range.extent_thresh = defrag->extent_thresh; + + sb_start_write(fs_info->sb); +- num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, ++ ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid, + BTRFS_DEFRAG_BATCH); + sb_end_write(fs_info->sb); +- /* +- * if we filled the whole defrag batch, there +- * must be more work to do. Queue this defrag +- * again +- */ +- if (num_defrag == BTRFS_DEFRAG_BATCH) { +- defrag->last_offset = range.start; +- btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); +- } else if (defrag->last_offset && !defrag->cycled) { +- /* +- * we didn't fill our defrag batch, but +- * we didn't start at zero. Make sure we loop +- * around to the start of the file. +- */ +- defrag->last_offset = 0; +- defrag->cycled = 1; +- btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); +- } else { +- kmem_cache_free(btrfs_inode_defrag_cachep, defrag); +- } +- + iput(inode); +- return 0; ++ ++ if (ret < 0) ++ goto cleanup; ++ ++ cur = max(cur + fs_info->sectorsize, range.start); ++ goto again; ++ + cleanup: + kmem_cache_free(btrfs_inode_defrag_cachep, defrag); + return ret; +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 39a6745434613..3be5735372ee8 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -561,12 +561,12 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, + } + + static inline void inode_should_defrag(struct btrfs_inode *inode, +- u64 start, u64 end, u64 num_bytes, u64 small_write) ++ u64 start, u64 end, u64 num_bytes, u32 small_write) + { + /* If this is a small write inside eof, kick off a defrag */ + if (num_bytes < small_write && + (start > 0 || end + 1 < inode->disk_i_size)) +- btrfs_add_inode_defrag(NULL, inode); ++ btrfs_add_inode_defrag(NULL, inode, small_write); + } + + /* +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index cec7163bc8730..541a4fbfd79ec 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -1020,23 +1020,37 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start, + return em; + } + ++static u32 get_extent_max_capacity(const struct extent_map *em) ++{ ++ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) ++ return BTRFS_MAX_COMPRESSED; ++ return BTRFS_MAX_EXTENT_SIZE; ++} ++ + static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em, + bool locked) + { + struct extent_map *next; +- bool ret = true; ++ bool ret = false; + + /* this is the last extent */ + if (em->start + em->len >= i_size_read(inode)) + return false; + + next = defrag_lookup_extent(inode, em->start + em->len, locked); ++ /* No more em or hole */ + if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) +- ret = false; +- else if ((em->block_start + em->block_len == next->block_start) && +- (em->block_len > SZ_128K && next->block_len > SZ_128K)) +- ret = false; +- ++ goto out; ++ if (test_bit(EXTENT_FLAG_PREALLOC, &next->flags)) ++ goto out; ++ /* ++ * If the next extent is at its max capacity, defragging current extent ++ * makes no sense, as the total number of extents won't change. ++ */ ++ if (next->len >= get_extent_max_capacity(em)) ++ goto out; ++ ret = true; ++out: + free_extent_map(next); + return ret; + } +@@ -1160,8 +1174,10 @@ struct defrag_target_range { + static int defrag_collect_targets(struct btrfs_inode *inode, + u64 start, u64 len, u32 extent_thresh, + u64 newer_than, bool do_compress, +- bool locked, struct list_head *target_list) ++ bool locked, struct list_head *target_list, ++ u64 *last_scanned_ret) + { ++ bool last_is_target = false; + u64 cur = start; + int ret = 0; + +@@ -1171,6 +1187,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode, + bool next_mergeable = true; + u64 range_len; + ++ last_is_target = false; + em = defrag_lookup_extent(&inode->vfs_inode, cur, locked); + if (!em) + break; +@@ -1228,6 +1245,13 @@ static int defrag_collect_targets(struct btrfs_inode *inode, + if (range_len >= extent_thresh) + goto next; + ++ /* ++ * Skip extents already at its max capacity, this is mostly for ++ * compressed extents, which max cap is only 128K. ++ */ ++ if (em->len >= get_extent_max_capacity(em)) ++ goto next; ++ + next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em, + locked); + if (!next_mergeable) { +@@ -1246,6 +1270,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode, + } + + add: ++ last_is_target = true; + range_len = min(extent_map_end(em), start + len) - cur; + /* + * This one is a good target, check if it can be merged into +@@ -1289,6 +1314,17 @@ next: + kfree(entry); + } + } ++ if (!ret && last_scanned_ret) { ++ /* ++ * If the last extent is not a target, the caller can skip to ++ * the end of that extent. ++ * Otherwise, we can only go the end of the specified range. ++ */ ++ if (!last_is_target) ++ *last_scanned_ret = max(cur, *last_scanned_ret); ++ else ++ *last_scanned_ret = max(start + len, *last_scanned_ret); ++ } + return ret; + } + +@@ -1347,7 +1383,8 @@ static int defrag_one_locked_target(struct btrfs_inode *inode, + } + + static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len, +- u32 extent_thresh, u64 newer_than, bool do_compress) ++ u32 extent_thresh, u64 newer_than, bool do_compress, ++ u64 *last_scanned_ret) + { + struct extent_state *cached_state = NULL; + struct defrag_target_range *entry; +@@ -1393,7 +1430,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len, + */ + ret = defrag_collect_targets(inode, start, len, extent_thresh, + newer_than, do_compress, true, +- &target_list); ++ &target_list, last_scanned_ret); + if (ret < 0) + goto unlock_extent; + +@@ -1428,7 +1465,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode, + u64 start, u32 len, u32 extent_thresh, + u64 newer_than, bool do_compress, + unsigned long *sectors_defragged, +- unsigned long max_sectors) ++ unsigned long max_sectors, ++ u64 *last_scanned_ret) + { + const u32 sectorsize = inode->root->fs_info->sectorsize; + struct defrag_target_range *entry; +@@ -1439,7 +1477,7 @@ static int defrag_one_cluster(struct btrfs_inode *inode, + BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE)); + ret = defrag_collect_targets(inode, start, len, extent_thresh, + newer_than, do_compress, false, +- &target_list); ++ &target_list, NULL); + if (ret < 0) + goto out; + +@@ -1456,6 +1494,15 @@ static int defrag_one_cluster(struct btrfs_inode *inode, + range_len = min_t(u32, range_len, + (max_sectors - *sectors_defragged) * sectorsize); + ++ /* ++ * If defrag_one_range() has updated last_scanned_ret, ++ * our range may already be invalid (e.g. hole punched). ++ * Skip if our range is before last_scanned_ret, as there is ++ * no need to defrag the range anymore. ++ */ ++ if (entry->start + range_len <= *last_scanned_ret) ++ continue; ++ + if (ra) + page_cache_sync_readahead(inode->vfs_inode.i_mapping, + ra, NULL, entry->start >> PAGE_SHIFT, +@@ -1468,7 +1515,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode, + * accounting. + */ + ret = defrag_one_range(inode, entry->start, range_len, +- extent_thresh, newer_than, do_compress); ++ extent_thresh, newer_than, do_compress, ++ last_scanned_ret); + if (ret < 0) + break; + *sectors_defragged += range_len >> +@@ -1479,6 +1527,8 @@ out: + list_del_init(&entry->list); + kfree(entry); + } ++ if (ret >= 0) ++ *last_scanned_ret = max(*last_scanned_ret, start + len); + return ret; + } + +@@ -1564,6 +1614,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra, + + while (cur < last_byte) { + const unsigned long prev_sectors_defragged = sectors_defragged; ++ u64 last_scanned = cur; + u64 cluster_end; + + /* The cluster size 256K should always be page aligned */ +@@ -1593,8 +1644,8 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra, + BTRFS_I(inode)->defrag_compress = compress_type; + ret = defrag_one_cluster(BTRFS_I(inode), ra, cur, + cluster_end + 1 - cur, extent_thresh, +- newer_than, do_compress, +- §ors_defragged, max_to_defrag); ++ newer_than, do_compress, §ors_defragged, ++ max_to_defrag, &last_scanned); + + if (sectors_defragged > prev_sectors_defragged) + balance_dirty_pages_ratelimited(inode->i_mapping); +@@ -1602,7 +1653,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra, + btrfs_inode_unlock(inode, 0); + if (ret < 0) + break; +- cur = cluster_end + 1; ++ cur = max(cluster_end + 1, last_scanned); + if (ret > 0) { + ret = 0; + break; +diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c +index 0fb90cbe76697..e6e28a9c79877 100644 +--- a/fs/btrfs/lzo.c ++++ b/fs/btrfs/lzo.c +@@ -380,6 +380,17 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) + kunmap(cur_page); + cur_in += LZO_LEN; + ++ if (seg_len > lzo1x_worst_compress(PAGE_SIZE)) { ++ /* ++ * seg_len shouldn't be larger than we have allocated ++ * for workspace->cbuf ++ */ ++ btrfs_err(fs_info, "unexpectedly large lzo segment len %u", ++ seg_len); ++ ret = -EIO; ++ goto out; ++ } ++ + /* Copy the compressed segment payload into workspace */ + copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in); + +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c +index 7733e8ac0a698..51382d2be3d44 100644 +--- a/fs/btrfs/tree-checker.c ++++ b/fs/btrfs/tree-checker.c +@@ -965,6 +965,7 @@ static int check_dev_item(struct extent_buffer *leaf, + struct btrfs_key *key, int slot) + { + struct btrfs_dev_item *ditem; ++ const u32 item_size = btrfs_item_size_nr(leaf, slot); + + if (unlikely(key->objectid != BTRFS_DEV_ITEMS_OBJECTID)) { + dev_item_err(leaf, slot, +@@ -972,6 +973,13 @@ static int check_dev_item(struct extent_buffer *leaf, + key->objectid, BTRFS_DEV_ITEMS_OBJECTID); + return -EUCLEAN; + } ++ ++ if (unlikely(item_size != sizeof(*ditem))) { ++ dev_item_err(leaf, slot, "invalid item size: has %u expect %zu", ++ item_size, sizeof(*ditem)); ++ return -EUCLEAN; ++ } ++ + ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); + if (unlikely(btrfs_device_id(leaf, ditem) != key->offset)) { + dev_item_err(leaf, slot, +@@ -1007,6 +1015,7 @@ static int check_inode_item(struct extent_buffer *leaf, + struct btrfs_inode_item *iitem; + u64 super_gen = btrfs_super_generation(fs_info->super_copy); + u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777); ++ const u32 item_size = btrfs_item_size_nr(leaf, slot); + u32 mode; + int ret; + u32 flags; +@@ -1016,6 +1025,12 @@ static int check_inode_item(struct extent_buffer *leaf, + if (unlikely(ret < 0)) + return ret; + ++ if (unlikely(item_size != sizeof(*iitem))) { ++ generic_err(leaf, slot, "invalid item size: has %u expect %zu", ++ item_size, sizeof(*iitem)); ++ return -EUCLEAN; ++ } ++ + iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item); + + /* Here we use super block generation + 1 to handle log tree */ +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c +index d3cd2a94d1e8c..d1f9d26322027 100644 +--- a/fs/configfs/dir.c ++++ b/fs/configfs/dir.c +@@ -34,6 +34,14 @@ + */ + DEFINE_SPINLOCK(configfs_dirent_lock); + ++/* ++ * All of link_obj/unlink_obj/link_group/unlink_group require that ++ * subsys->su_mutex is held. ++ * But parent configfs_subsystem is NULL when config_item is root. ++ * Use this mutex when config_item is root. ++ */ ++static DEFINE_MUTEX(configfs_subsystem_mutex); ++ + static void configfs_d_iput(struct dentry * dentry, + struct inode * inode) + { +@@ -1859,7 +1867,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) + group->cg_item.ci_name = group->cg_item.ci_namebuf; + + sd = root->d_fsdata; ++ mutex_lock(&configfs_subsystem_mutex); + link_group(to_config_group(sd->s_element), group); ++ mutex_unlock(&configfs_subsystem_mutex); + + inode_lock_nested(d_inode(root), I_MUTEX_PARENT); + +@@ -1884,7 +1894,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) + inode_unlock(d_inode(root)); + + if (err) { ++ mutex_lock(&configfs_subsystem_mutex); + unlink_group(group); ++ mutex_unlock(&configfs_subsystem_mutex); + configfs_release_fs(); + } + put_fragment(frag); +@@ -1931,7 +1943,9 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) + + dput(dentry); + ++ mutex_lock(&configfs_subsystem_mutex); + unlink_group(group); ++ mutex_unlock(&configfs_subsystem_mutex); + configfs_release_fs(); + } + +diff --git a/fs/io_uring.c b/fs/io_uring.c +index a92f276f21d9c..db724482cd117 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -4477,6 +4477,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head) + } else { + list_add_tail(&buf->list, &(*head)->list); + } ++ cond_resched(); + } + + return i ? i : -ENOMEM; +@@ -7633,7 +7634,7 @@ static int io_run_task_work_sig(void) + /* when returns >0, the caller should retry */ + static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, + struct io_wait_queue *iowq, +- signed long *timeout) ++ ktime_t timeout) + { + int ret; + +@@ -7645,8 +7646,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, + if (test_bit(0, &ctx->check_cq_overflow)) + return 1; + +- *timeout = schedule_timeout(*timeout); +- return !*timeout ? -ETIME : 1; ++ if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS)) ++ return -ETIME; ++ return 1; + } + + /* +@@ -7659,7 +7661,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, + { + struct io_wait_queue iowq; + struct io_rings *rings = ctx->rings; +- signed long timeout = MAX_SCHEDULE_TIMEOUT; ++ ktime_t timeout = KTIME_MAX; + int ret; + + do { +@@ -7675,7 +7677,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, + + if (get_timespec64(&ts, uts)) + return -EFAULT; +- timeout = timespec64_to_jiffies(&ts); ++ timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns()); + } + + if (sig) { +@@ -7707,7 +7709,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, + } + prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, + TASK_INTERRUPTIBLE); +- ret = io_cqring_wait_schedule(ctx, &iowq, &timeout); ++ ret = io_cqring_wait_schedule(ctx, &iowq, timeout); + finish_wait(&ctx->cq_wait, &iowq.wq); + cond_resched(); + } while (ret > 0); +@@ -7864,7 +7866,15 @@ static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data, + ret = wait_for_completion_interruptible(&data->done); + if (!ret) { + mutex_lock(&ctx->uring_lock); +- break; ++ if (atomic_read(&data->refs) > 0) { ++ /* ++ * it has been revived by another thread while ++ * we were unlocked ++ */ ++ mutex_unlock(&ctx->uring_lock); ++ } else { ++ break; ++ } + } + + atomic_inc(&data->refs); +diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c +index 3616839c5c4b6..f2625a372a3ae 100644 +--- a/fs/tracefs/inode.c ++++ b/fs/tracefs/inode.c +@@ -264,7 +264,6 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) + if (!gid_valid(gid)) + return -EINVAL; + opts->gid = gid; +- set_gid(tracefs_mount->mnt_root, gid); + break; + case Opt_mode: + if (match_octal(&args[0], &option)) +@@ -291,7 +290,9 @@ static int tracefs_apply_options(struct super_block *sb) + inode->i_mode |= opts->mode; + + inode->i_uid = opts->uid; +- inode->i_gid = opts->gid; ++ ++ /* Set all the group ids to the mount option */ ++ set_gid(sb->s_root, opts->gid); + + return 0; + } +diff --git a/include/linux/bpf.h b/include/linux/bpf.h +index 29b9b199c56bb..7078938ba235c 100644 +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -209,11 +209,9 @@ static inline bool map_value_has_timer(const struct bpf_map *map) + static inline void check_and_init_map_value(struct bpf_map *map, void *dst) + { + if (unlikely(map_value_has_spin_lock(map))) +- *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = +- (struct bpf_spin_lock){}; ++ memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock)); + if (unlikely(map_value_has_timer(map))) +- *(struct bpf_timer *)(dst + map->timer_off) = +- (struct bpf_timer){}; ++ memset(dst + map->timer_off, 0, sizeof(struct bpf_timer)); + } + + /* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */ +@@ -224,7 +222,8 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) + if (unlikely(map_value_has_spin_lock(map))) { + s_off = map->spin_lock_off; + s_sz = sizeof(struct bpf_spin_lock); +- } else if (unlikely(map_value_has_timer(map))) { ++ } ++ if (unlikely(map_value_has_timer(map))) { + t_off = map->timer_off; + t_sz = sizeof(struct bpf_timer); + } +diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h +index 98efb7b5660d9..c9a3ac9efeaa9 100644 +--- a/include/linux/nvmem-provider.h ++++ b/include/linux/nvmem-provider.h +@@ -70,7 +70,8 @@ struct nvmem_keepout { + * @word_size: Minimum read/write access granularity. + * @stride: Minimum read/write access stride. + * @priv: User context passed to read/write callbacks. +- * @wp-gpio: Write protect pin ++ * @wp-gpio: Write protect pin ++ * @ignore_wp: Write Protect pin is managed by the provider. + * + * Note: A default "nvmem" name will be assigned to the device if + * no name is specified in its configuration. In such case "" is +@@ -92,6 +93,7 @@ struct nvmem_config { + enum nvmem_type type; + bool read_only; + bool root_only; ++ bool ignore_wp; + struct device_node *of_node; + bool no_of_node; + nvmem_reg_read_t reg_read; +diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h +index 584d94be9c8b0..18a717fe62eb0 100644 +--- a/include/linux/skmsg.h ++++ b/include/linux/skmsg.h +@@ -507,12 +507,6 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock) + return !!psock->saved_data_ready; + } + +-static inline bool sk_is_tcp(const struct sock *sk) +-{ +- return sk->sk_type == SOCK_STREAM && +- sk->sk_protocol == IPPROTO_TCP; +-} +- + static inline bool sk_is_udp(const struct sock *sk) + { + return sk->sk_type == SOCK_DGRAM && +diff --git a/include/linux/slab.h b/include/linux/slab.h +index 181045148b065..79c2ff9256d04 100644 +--- a/include/linux/slab.h ++++ b/include/linux/slab.h +@@ -669,8 +669,7 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag + * allocator where we care about the real place the memory allocation + * request comes from. + */ +-extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) +- __alloc_size(1); ++extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller); + #define kmalloc_track_caller(size, flags) \ + __kmalloc_track_caller(size, flags, _RET_IP_) + +diff --git a/include/net/checksum.h b/include/net/checksum.h +index 5b96d5bd6e545..d3b5d368a0caa 100644 +--- a/include/net/checksum.h ++++ b/include/net/checksum.h +@@ -22,7 +22,7 @@ + #include + + #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +-static inline ++static __always_inline + __wsum csum_and_copy_from_user (const void __user *src, void *dst, + int len) + { +@@ -33,7 +33,7 @@ __wsum csum_and_copy_from_user (const void __user *src, void *dst, + #endif + + #ifndef HAVE_CSUM_COPY_USER +-static __inline__ __wsum csum_and_copy_to_user ++static __always_inline __wsum csum_and_copy_to_user + (const void *src, void __user *dst, int len) + { + __wsum sum = csum_partial(src, len, ~0U); +@@ -45,7 +45,7 @@ static __inline__ __wsum csum_and_copy_to_user + #endif + + #ifndef _HAVE_ARCH_CSUM_AND_COPY +-static inline __wsum ++static __always_inline __wsum + csum_partial_copy_nocheck(const void *src, void *dst, int len) + { + memcpy(dst, src, len); +@@ -54,7 +54,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len) + #endif + + #ifndef HAVE_ARCH_CSUM_ADD +-static inline __wsum csum_add(__wsum csum, __wsum addend) ++static __always_inline __wsum csum_add(__wsum csum, __wsum addend) + { + u32 res = (__force u32)csum; + res += (__force u32)addend; +@@ -62,12 +62,12 @@ static inline __wsum csum_add(__wsum csum, __wsum addend) + } + #endif + +-static inline __wsum csum_sub(__wsum csum, __wsum addend) ++static __always_inline __wsum csum_sub(__wsum csum, __wsum addend) + { + return csum_add(csum, ~addend); + } + +-static inline __sum16 csum16_add(__sum16 csum, __be16 addend) ++static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend) + { + u16 res = (__force u16)csum; + +@@ -75,12 +75,12 @@ static inline __sum16 csum16_add(__sum16 csum, __be16 addend) + return (__force __sum16)(res + (res < (__force u16)addend)); + } + +-static inline __sum16 csum16_sub(__sum16 csum, __be16 addend) ++static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend) + { + return csum16_add(csum, ~addend); + } + +-static inline __wsum csum_shift(__wsum sum, int offset) ++static __always_inline __wsum csum_shift(__wsum sum, int offset) + { + /* rotate sum to align it with a 16b boundary */ + if (offset & 1) +@@ -88,42 +88,43 @@ static inline __wsum csum_shift(__wsum sum, int offset) + return sum; + } + +-static inline __wsum ++static __always_inline __wsum + csum_block_add(__wsum csum, __wsum csum2, int offset) + { + return csum_add(csum, csum_shift(csum2, offset)); + } + +-static inline __wsum ++static __always_inline __wsum + csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len) + { + return csum_block_add(csum, csum2, offset); + } + +-static inline __wsum ++static __always_inline __wsum + csum_block_sub(__wsum csum, __wsum csum2, int offset) + { + return csum_block_add(csum, ~csum2, offset); + } + +-static inline __wsum csum_unfold(__sum16 n) ++static __always_inline __wsum csum_unfold(__sum16 n) + { + return (__force __wsum)n; + } + +-static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) ++static __always_inline ++__wsum csum_partial_ext(const void *buff, int len, __wsum sum) + { + return csum_partial(buff, len, sum); + } + + #define CSUM_MANGLED_0 ((__force __sum16)0xffff) + +-static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) ++static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) + { + *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); + } + +-static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) ++static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) + { + __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); + +@@ -136,11 +137,16 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) + * m : old value of a 16bit field + * m' : new value of a 16bit field + */ +-static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) ++static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) + { + *sum = ~csum16_add(csum16_sub(~(*sum), old), new); + } + ++static inline void csum_replace(__wsum *csum, __wsum old, __wsum new) ++{ ++ *csum = csum_add(csum_sub(*csum, old), new); ++} ++ + struct sk_buff; + void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, + __be32 from, __be32 to, bool pseudohdr); +@@ -150,16 +156,16 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, + void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, + __wsum diff, bool pseudohdr); + +-static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, +- __be16 from, __be16 to, +- bool pseudohdr) ++static __always_inline ++void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, ++ __be16 from, __be16 to, bool pseudohdr) + { + inet_proto_csum_replace4(sum, skb, (__force __be32)from, + (__force __be32)to, pseudohdr); + } + +-static inline __wsum remcsum_adjust(void *ptr, __wsum csum, +- int start, int offset) ++static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum, ++ int start, int offset) + { + __sum16 *psum = (__sum16 *)(ptr + offset); + __wsum delta; +@@ -175,7 +181,7 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum, + return delta; + } + +-static inline void remcsum_unadjust(__sum16 *psum, __wsum delta) ++static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta) + { + *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum)); + } +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h +index a0d9e0b47ab8f..1dbddde8364ab 100644 +--- a/include/net/netfilter/nf_tables.h ++++ b/include/net/netfilter/nf_tables.h +@@ -889,9 +889,9 @@ struct nft_expr_ops { + int (*offload)(struct nft_offload_ctx *ctx, + struct nft_flow_rule *flow, + const struct nft_expr *expr); ++ bool (*offload_action)(const struct nft_expr *expr); + void (*offload_stats)(struct nft_expr *expr, + const struct flow_stats *stats); +- u32 offload_flags; + const struct nft_expr_type *type; + void *data; + }; +diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h +index f9d95ff82df83..7971478439580 100644 +--- a/include/net/netfilter/nf_tables_offload.h ++++ b/include/net/netfilter/nf_tables_offload.h +@@ -67,8 +67,6 @@ struct nft_flow_rule { + struct flow_rule *rule; + }; + +-#define NFT_OFFLOAD_F_ACTION (1 << 0) +- + void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow, + enum flow_dissector_key_id addr_type); + +diff --git a/include/net/sock.h b/include/net/sock.h +index d47e9658da285..cd69595949614 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -504,7 +504,7 @@ struct sock { + u16 sk_tsflags; + int sk_bind_phc; + u8 sk_shutdown; +- u32 sk_tskey; ++ atomic_t sk_tskey; + atomic_t sk_zckey; + + u8 sk_clockid; +@@ -2636,7 +2636,7 @@ static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags, + __sock_tx_timestamp(tsflags, tx_flags); + if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey && + tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) +- *tskey = sk->sk_tskey++; ++ *tskey = atomic_inc_return(&sk->sk_tskey) - 1; + } + if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) + *tx_flags |= SKBTX_WIFI_STATUS; +@@ -2654,6 +2654,11 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags) + &skb_shinfo(skb)->tskey); + } + ++static inline bool sk_is_tcp(const struct sock *sk) ++{ ++ return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP; ++} ++ + /** + * sk_eat_skb - Release a skb if it is no longer needed + * @sk: socket to eat this skb from +diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c +index d2ff8ba7ae58f..c9da250fee38c 100644 +--- a/kernel/bpf/btf.c ++++ b/kernel/bpf/btf.c +@@ -5564,12 +5564,53 @@ static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = { + #endif + }; + ++/* Returns true if struct is composed of scalars, 4 levels of nesting allowed */ ++static bool __btf_type_is_scalar_struct(struct bpf_verifier_log *log, ++ const struct btf *btf, ++ const struct btf_type *t, int rec) ++{ ++ const struct btf_type *member_type; ++ const struct btf_member *member; ++ u32 i; ++ ++ if (!btf_type_is_struct(t)) ++ return false; ++ ++ for_each_member(i, t, member) { ++ const struct btf_array *array; ++ ++ member_type = btf_type_skip_modifiers(btf, member->type, NULL); ++ if (btf_type_is_struct(member_type)) { ++ if (rec >= 3) { ++ bpf_log(log, "max struct nesting depth exceeded\n"); ++ return false; ++ } ++ if (!__btf_type_is_scalar_struct(log, btf, member_type, rec + 1)) ++ return false; ++ continue; ++ } ++ if (btf_type_is_array(member_type)) { ++ array = btf_type_array(member_type); ++ if (!array->nelems) ++ return false; ++ member_type = btf_type_skip_modifiers(btf, array->type, NULL); ++ if (!btf_type_is_scalar(member_type)) ++ return false; ++ continue; ++ } ++ if (!btf_type_is_scalar(member_type)) ++ return false; ++ } ++ return true; ++} ++ + static int btf_check_func_arg_match(struct bpf_verifier_env *env, + const struct btf *btf, u32 func_id, + struct bpf_reg_state *regs, + bool ptr_to_mem_ok) + { + struct bpf_verifier_log *log = &env->log; ++ bool is_kfunc = btf_is_kernel(btf); + const char *func_name, *ref_tname; + const struct btf_type *t, *ref_t; + const struct btf_param *args; +@@ -5622,7 +5663,21 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, + + ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); + ref_tname = btf_name_by_offset(btf, ref_t->name_off); +- if (btf_is_kernel(btf)) { ++ if (btf_get_prog_ctx_type(log, btf, t, ++ env->prog->type, i)) { ++ /* If function expects ctx type in BTF check that caller ++ * is passing PTR_TO_CTX. ++ */ ++ if (reg->type != PTR_TO_CTX) { ++ bpf_log(log, ++ "arg#%d expected pointer to ctx, but got %s\n", ++ i, btf_type_str(t)); ++ return -EINVAL; ++ } ++ if (check_ctx_reg(env, reg, regno)) ++ return -EINVAL; ++ } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID || ++ (reg2btf_ids[base_type(reg->type)] && !type_flag(reg->type)))) { + const struct btf_type *reg_ref_t; + const struct btf *reg_btf; + const char *reg_ref_tname; +@@ -5638,14 +5693,9 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, + if (reg->type == PTR_TO_BTF_ID) { + reg_btf = reg->btf; + reg_ref_id = reg->btf_id; +- } else if (reg2btf_ids[reg->type]) { +- reg_btf = btf_vmlinux; +- reg_ref_id = *reg2btf_ids[reg->type]; + } else { +- bpf_log(log, "kernel function %s args#%d expected pointer to %s %s but R%d is not a pointer to btf_id\n", +- func_name, i, +- btf_type_str(ref_t), ref_tname, regno); +- return -EINVAL; ++ reg_btf = btf_vmlinux; ++ reg_ref_id = *reg2btf_ids[base_type(reg->type)]; + } + + reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, +@@ -5661,23 +5711,24 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, + reg_ref_tname); + return -EINVAL; + } +- } else if (btf_get_prog_ctx_type(log, btf, t, +- env->prog->type, i)) { +- /* If function expects ctx type in BTF check that caller +- * is passing PTR_TO_CTX. +- */ +- if (reg->type != PTR_TO_CTX) { +- bpf_log(log, +- "arg#%d expected pointer to ctx, but got %s\n", +- i, btf_type_str(t)); +- return -EINVAL; +- } +- if (check_ctx_reg(env, reg, regno)) +- return -EINVAL; + } else if (ptr_to_mem_ok) { + const struct btf_type *resolve_ret; + u32 type_size; + ++ if (is_kfunc) { ++ /* Permit pointer to mem, but only when argument ++ * type is pointer to scalar, or struct composed ++ * (recursively) of scalars. ++ */ ++ if (!btf_type_is_scalar(ref_t) && ++ !__btf_type_is_scalar_struct(log, btf, ref_t, 0)) { ++ bpf_log(log, ++ "arg#%d pointer type %s %s must point to scalar or struct with scalar\n", ++ i, btf_type_str(ref_t), ref_tname); ++ return -EINVAL; ++ } ++ } ++ + resolve_ret = btf_resolve_size(btf, ref_t, &type_size); + if (IS_ERR(resolve_ret)) { + bpf_log(log, +@@ -5690,6 +5741,8 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, + if (check_mem_reg(env, reg, regno, type_size)) + return -EINVAL; + } else { ++ bpf_log(log, "reg type unsupported for arg#%d %sfunction %s#%d\n", i, ++ is_kfunc ? "kernel " : "", func_name, func_id); + return -EINVAL; + } + } +@@ -5739,7 +5792,7 @@ int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, + const struct btf *btf, u32 func_id, + struct bpf_reg_state *regs) + { +- return btf_check_func_arg_match(env, btf, func_id, regs, false); ++ return btf_check_func_arg_match(env, btf, func_id, regs, true); + } + + /* Convert BTF of a function into bpf_reg_state if possible +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c +index 4c6c2c2137458..d2914cb9b7d18 100644 +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -1354,6 +1354,7 @@ int generic_map_delete_batch(struct bpf_map *map, + maybe_wait_bpf_programs(map); + if (err) + break; ++ cond_resched(); + } + if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) + err = -EFAULT; +@@ -1411,6 +1412,7 @@ int generic_map_update_batch(struct bpf_map *map, + + if (err) + break; ++ cond_resched(); + } + + if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) +@@ -1508,6 +1510,7 @@ int generic_map_lookup_batch(struct bpf_map *map, + swap(prev_key, key); + retry = MAP_LOOKUP_RETRIES; + cp++; ++ cond_resched(); + } + + if (err == -EFAULT) +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c +index 0e877dbcfeea9..afc6c0e9c966e 100644 +--- a/kernel/cgroup/cgroup-v1.c ++++ b/kernel/cgroup/cgroup-v1.c +@@ -546,6 +546,7 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) + { + struct cgroup *cgrp; ++ struct cgroup_file_ctx *ctx; + + BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); + +@@ -553,8 +554,9 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, + * Release agent gets called with all capabilities, + * require capabilities to set release agent. + */ +- if ((of->file->f_cred->user_ns != &init_user_ns) || +- !capable(CAP_SYS_ADMIN)) ++ ctx = of->priv; ++ if ((ctx->ns->user_ns != &init_user_ns) || ++ !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN)) + return -EPERM; + + cgrp = cgroup_kn_lock_live(of->kn, false); +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c +index d729cbd2445af..df62527f5e0b1 100644 +--- a/kernel/cgroup/cpuset.c ++++ b/kernel/cgroup/cpuset.c +@@ -2269,6 +2269,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) + cgroup_taskset_first(tset, &css); + cs = css_cs(css); + ++ cpus_read_lock(); + percpu_down_write(&cpuset_rwsem); + + guarantee_online_mems(cs, &cpuset_attach_nodemask_to); +@@ -2322,6 +2323,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) + wake_up(&cpuset_attach_wq); + + percpu_up_write(&cpuset_rwsem); ++ cpus_read_unlock(); + } + + /* The various types of files and directories in a cpuset file system */ +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c +index 3d5c07239a2a8..67c7979c40c0b 100644 +--- a/kernel/trace/trace_events_trigger.c ++++ b/kernel/trace/trace_events_trigger.c +@@ -955,6 +955,16 @@ traceon_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, + struct ring_buffer_event *event) + { ++ struct trace_event_file *file = data->private_data; ++ ++ if (file) { ++ if (tracer_tracing_is_on(file->tr)) ++ return; ++ ++ tracer_tracing_on(file->tr); ++ return; ++ } ++ + if (tracing_is_on()) + return; + +@@ -966,8 +976,15 @@ traceon_count_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, + struct ring_buffer_event *event) + { +- if (tracing_is_on()) +- return; ++ struct trace_event_file *file = data->private_data; ++ ++ if (file) { ++ if (tracer_tracing_is_on(file->tr)) ++ return; ++ } else { ++ if (tracing_is_on()) ++ return; ++ } + + if (!data->count) + return; +@@ -975,7 +992,10 @@ traceon_count_trigger(struct event_trigger_data *data, + if (data->count != -1) + (data->count)--; + +- tracing_on(); ++ if (file) ++ tracer_tracing_on(file->tr); ++ else ++ tracing_on(); + } + + static void +@@ -983,6 +1003,16 @@ traceoff_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, + struct ring_buffer_event *event) + { ++ struct trace_event_file *file = data->private_data; ++ ++ if (file) { ++ if (!tracer_tracing_is_on(file->tr)) ++ return; ++ ++ tracer_tracing_off(file->tr); ++ return; ++ } ++ + if (!tracing_is_on()) + return; + +@@ -994,8 +1024,15 @@ traceoff_count_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, + struct ring_buffer_event *event) + { +- if (!tracing_is_on()) +- return; ++ struct trace_event_file *file = data->private_data; ++ ++ if (file) { ++ if (!tracer_tracing_is_on(file->tr)) ++ return; ++ } else { ++ if (!tracing_is_on()) ++ return; ++ } + + if (!data->count) + return; +@@ -1003,7 +1040,10 @@ traceoff_count_trigger(struct event_trigger_data *data, + if (data->count != -1) + (data->count)--; + +- tracing_off(); ++ if (file) ++ tracer_tracing_off(file->tr); ++ else ++ tracing_off(); + } + + static int +@@ -1200,7 +1240,12 @@ stacktrace_trigger(struct event_trigger_data *data, + struct trace_buffer *buffer, void *rec, + struct ring_buffer_event *event) + { +- trace_dump_stack(STACK_SKIP); ++ struct trace_event_file *file = data->private_data; ++ ++ if (file) ++ __trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP); ++ else ++ trace_dump_stack(STACK_SKIP); + } + + static void +diff --git a/mm/filemap.c b/mm/filemap.c +index 39c4c46c61337..56b437eb85547 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -2365,8 +2365,12 @@ static void filemap_get_read_batch(struct address_space *mapping, + break; + if (PageReadahead(head)) + break; +- xas.xa_index = head->index + thp_nr_pages(head) - 1; +- xas.xa_offset = (xas.xa_index >> xas.xa_shift) & XA_CHUNK_MASK; ++ if (PageHead(head)) { ++ xas_set(&xas, head->index + thp_nr_pages(head)); ++ /* Handle wrap correctly */ ++ if (xas.xa_index - 1 >= max) ++ break; ++ } + continue; + put_page: + put_page(head); +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index a1baa198519a2..221239db6389a 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -4159,10 +4159,10 @@ static int __init hugepages_setup(char *s) + pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); + return 0; + } ++ if (tmp >= nr_online_nodes) ++ goto invalid; + node = tmp; + p += count + 1; +- if (node < 0 || node >= nr_online_nodes) +- goto invalid; + /* Parse hugepages */ + if (sscanf(p, "%lu%n", &tmp, &count) != 1) + goto invalid; +@@ -4851,14 +4851,13 @@ again: + } + + static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, +- unsigned long new_addr, pte_t *src_pte) ++ unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte) + { + struct hstate *h = hstate_vma(vma); + struct mm_struct *mm = vma->vm_mm; +- pte_t *dst_pte, pte; + spinlock_t *src_ptl, *dst_ptl; ++ pte_t pte; + +- dst_pte = huge_pte_offset(mm, new_addr, huge_page_size(h)); + dst_ptl = huge_pte_lock(h, mm, dst_pte); + src_ptl = huge_pte_lockptr(h, mm, src_pte); + +@@ -4917,7 +4916,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, + if (!dst_pte) + break; + +- move_huge_pte(vma, old_addr, new_addr, src_pte); ++ move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte); + } + flush_tlb_range(vma, old_end - len, old_end); + mmu_notifier_invalidate_range_end(&range); +diff --git a/mm/memblock.c b/mm/memblock.c +index 1018e50566f35..b12a364f2766f 100644 +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -366,14 +366,20 @@ void __init memblock_discard(void) + addr = __pa(memblock.reserved.regions); + size = PAGE_ALIGN(sizeof(struct memblock_region) * + memblock.reserved.max); +- memblock_free_late(addr, size); ++ if (memblock_reserved_in_slab) ++ kfree(memblock.reserved.regions); ++ else ++ memblock_free_late(addr, size); + } + + if (memblock.memory.regions != memblock_memory_init_regions) { + addr = __pa(memblock.memory.regions); + size = PAGE_ALIGN(sizeof(struct memblock_region) * + memblock.memory.max); +- memblock_free_late(addr, size); ++ if (memblock_memory_in_slab) ++ kfree(memblock.memory.regions); ++ else ++ memblock_free_late(addr, size); + } + + memblock_memory = NULL; +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c +index a271688780a2c..307ee1174a6e2 100644 +--- a/net/can/j1939/transport.c ++++ b/net/can/j1939/transport.c +@@ -2006,7 +2006,7 @@ struct j1939_session *j1939_tp_send(struct j1939_priv *priv, + /* set the end-packet for broadcast */ + session->pkt.last = session->pkt.total; + +- skcb->tskey = session->sk->sk_tskey++; ++ skcb->tskey = atomic_inc_return(&session->sk->sk_tskey) - 1; + session->tskey = skcb->tskey; + + return session; +diff --git a/net/core/filter.c b/net/core/filter.c +index 22bed067284fb..d4cdf11656b3f 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -2711,6 +2711,9 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, + if (unlikely(flags)) + return -EINVAL; + ++ if (unlikely(len == 0)) ++ return 0; ++ + /* First find the starting scatterlist element */ + i = msg->sg.start; + do { +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 909db87d7383d..f78969d8d8160 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -2254,7 +2254,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta) + /* Free pulled out fragments. */ + while ((list = skb_shinfo(skb)->frag_list) != insp) { + skb_shinfo(skb)->frag_list = list->next; +- kfree_skb(list); ++ consume_skb(list); + } + /* And insert new clone at head. */ + if (clone) { +@@ -4849,9 +4849,8 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb, + serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; + if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { + serr->ee.ee_data = skb_shinfo(skb)->tskey; +- if (sk->sk_protocol == IPPROTO_TCP && +- sk->sk_type == SOCK_STREAM) +- serr->ee.ee_data -= sk->sk_tskey; ++ if (sk_is_tcp(sk)) ++ serr->ee.ee_data -= atomic_read(&sk->sk_tskey); + } + + err = sock_queue_err_skb(sk, skb); +@@ -4919,8 +4918,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, + if (tsonly) { + #ifdef CONFIG_INET + if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && +- sk->sk_protocol == IPPROTO_TCP && +- sk->sk_type == SOCK_STREAM) { ++ sk_is_tcp(sk)) { + skb = tcp_get_timestamping_opt_stats(sk, orig_skb, + ack_skb); + opt_stats = true; +@@ -6227,7 +6225,7 @@ static int pskb_carve_frag_list(struct sk_buff *skb, + /* Free pulled out fragments. */ + while ((list = shinfo->frag_list) != insp) { + shinfo->frag_list = list->next; +- kfree_skb(list); ++ consume_skb(list); + } + /* And insert new clone at head. */ + if (clone) { +diff --git a/net/core/sock.c b/net/core/sock.c +index 7de234693a3bf..6613a864f7f5a 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -874,14 +874,13 @@ int sock_set_timestamping(struct sock *sk, int optname, + + if (val & SOF_TIMESTAMPING_OPT_ID && + !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { +- if (sk->sk_protocol == IPPROTO_TCP && +- sk->sk_type == SOCK_STREAM) { ++ if (sk_is_tcp(sk)) { + if ((1 << sk->sk_state) & + (TCPF_CLOSE | TCPF_LISTEN)) + return -EINVAL; +- sk->sk_tskey = tcp_sk(sk)->snd_una; ++ atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una); + } else { +- sk->sk_tskey = 0; ++ atomic_set(&sk->sk_tskey, 0); + } + } + +@@ -1372,8 +1371,7 @@ set_sndbuf: + + case SO_ZEROCOPY: + if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) { +- if (!((sk->sk_type == SOCK_STREAM && +- sk->sk_protocol == IPPROTO_TCP) || ++ if (!(sk_is_tcp(sk) || + (sk->sk_type == SOCK_DGRAM && + sk->sk_protocol == IPPROTO_UDP))) + ret = -ENOTSUPP; +diff --git a/net/dsa/master.c b/net/dsa/master.c +index e8e19857621bd..b0ab3cbeff3ca 100644 +--- a/net/dsa/master.c ++++ b/net/dsa/master.c +@@ -260,11 +260,16 @@ static void dsa_netdev_ops_set(struct net_device *dev, + dev->dsa_ptr->netdev_ops = ops; + } + ++/* Keep the master always promiscuous if the tagging protocol requires that ++ * (garbles MAC DA) or if it doesn't support unicast filtering, case in which ++ * it would revert to promiscuous mode as soon as we call dev_uc_add() on it ++ * anyway. ++ */ + static void dsa_master_set_promiscuity(struct net_device *dev, int inc) + { + const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops; + +- if (!ops->promisc_on_master) ++ if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master) + return; + + rtnl_lock(); +diff --git a/net/dsa/port.c b/net/dsa/port.c +index f6f12ad2b5251..6cc353b77681f 100644 +--- a/net/dsa/port.c ++++ b/net/dsa/port.c +@@ -777,9 +777,15 @@ int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr, + struct dsa_port *cpu_dp = dp->cpu_dp; + int err; + +- err = dev_uc_add(cpu_dp->master, addr); +- if (err) +- return err; ++ /* Avoid a call to __dev_set_promiscuity() on the master, which ++ * requires rtnl_lock(), since we can't guarantee that is held here, ++ * and we can't take it either. ++ */ ++ if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { ++ err = dev_uc_add(cpu_dp->master, addr); ++ if (err) ++ return err; ++ } + + return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info); + } +@@ -796,9 +802,11 @@ int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr, + struct dsa_port *cpu_dp = dp->cpu_dp; + int err; + +- err = dev_uc_del(cpu_dp->master, addr); +- if (err) +- return err; ++ if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) { ++ err = dev_uc_del(cpu_dp->master, addr); ++ if (err) ++ return err; ++ } + + return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info); + } +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c +index 5f70ffdae1b52..43dd5dd176c24 100644 +--- a/net/ipv4/af_inet.c ++++ b/net/ipv4/af_inet.c +@@ -1376,8 +1376,11 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, + } + + ops = rcu_dereference(inet_offloads[proto]); +- if (likely(ops && ops->callbacks.gso_segment)) ++ if (likely(ops && ops->callbacks.gso_segment)) { + segs = ops->callbacks.gso_segment(skb, features); ++ if (!segs) ++ skb->network_header = skb_mac_header(skb) + nhoff - skb->head; ++ } + + if (IS_ERR_OR_NULL(segs)) + goto out; +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index a4d2eb691cbc1..131066d0319a2 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -992,7 +992,7 @@ static int __ip_append_data(struct sock *sk, + + if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP && + sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) +- tskey = sk->sk_tskey++; ++ tskey = atomic_inc_return(&sk->sk_tskey) - 1; + + hh_len = LL_RESERVED_SPACE(rt->dst.dev); + +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c +index e3a159c8f231e..36e89b6873876 100644 +--- a/net/ipv4/ping.c ++++ b/net/ipv4/ping.c +@@ -187,7 +187,6 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) + (int)ident, &ipv6_hdr(skb)->daddr, dif); + #endif + } else { +- pr_err("ping: protocol(%x) is not supported\n", ntohs(skb->protocol)); + return NULL; + } + +diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c +index b91003538d87a..bc3a043a5d5c7 100644 +--- a/net/ipv4/udp_tunnel_nic.c ++++ b/net/ipv4/udp_tunnel_nic.c +@@ -846,7 +846,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn) + list_for_each_entry(node, &info->shared->devices, list) + if (node->dev == dev) + break; +- if (node->dev != dev) ++ if (list_entry_is_head(node, &info->shared->devices, list)) + return; + + list_del(&node->list); +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c +index 1cbd49d5788dd..b2919a8e9c012 100644 +--- a/net/ipv6/ip6_offload.c ++++ b/net/ipv6/ip6_offload.c +@@ -114,6 +114,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, + if (likely(ops && ops->callbacks.gso_segment)) { + skb_reset_transport_header(skb); + segs = ops->callbacks.gso_segment(skb, features); ++ if (!segs) ++ skb->network_header = skb_mac_header(skb) + nhoff - skb->head; + } + + if (IS_ERR_OR_NULL(segs)) +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index ff4e83e2a5068..22bf8fb617165 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1465,7 +1465,7 @@ static int __ip6_append_data(struct sock *sk, + + if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP && + sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) +- tskey = sk->sk_tskey++; ++ tskey = atomic_inc_return(&sk->sk_tskey) - 1; + + hh_len = LL_RESERVED_SPACE(rt->dst.dev); + +diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c +index 3240b72271a7f..7558802a14350 100644 +--- a/net/mptcp/mib.c ++++ b/net/mptcp/mib.c +@@ -35,12 +35,14 @@ static const struct snmp_mib mptcp_snmp_list[] = { + SNMP_MIB_ITEM("AddAddr", MPTCP_MIB_ADDADDR), + SNMP_MIB_ITEM("EchoAdd", MPTCP_MIB_ECHOADD), + SNMP_MIB_ITEM("PortAdd", MPTCP_MIB_PORTADD), ++ SNMP_MIB_ITEM("AddAddrDrop", MPTCP_MIB_ADDADDRDROP), + SNMP_MIB_ITEM("MPJoinPortSynRx", MPTCP_MIB_JOINPORTSYNRX), + SNMP_MIB_ITEM("MPJoinPortSynAckRx", MPTCP_MIB_JOINPORTSYNACKRX), + SNMP_MIB_ITEM("MPJoinPortAckRx", MPTCP_MIB_JOINPORTACKRX), + SNMP_MIB_ITEM("MismatchPortSynRx", MPTCP_MIB_MISMATCHPORTSYNRX), + SNMP_MIB_ITEM("MismatchPortAckRx", MPTCP_MIB_MISMATCHPORTACKRX), + SNMP_MIB_ITEM("RmAddr", MPTCP_MIB_RMADDR), ++ SNMP_MIB_ITEM("RmAddrDrop", MPTCP_MIB_RMADDRDROP), + SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW), + SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX), + SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX), +diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h +index ecd3d8b117e0b..2966fcb6548ba 100644 +--- a/net/mptcp/mib.h ++++ b/net/mptcp/mib.h +@@ -28,12 +28,14 @@ enum linux_mptcp_mib_field { + MPTCP_MIB_ADDADDR, /* Received ADD_ADDR with echo-flag=0 */ + MPTCP_MIB_ECHOADD, /* Received ADD_ADDR with echo-flag=1 */ + MPTCP_MIB_PORTADD, /* Received ADD_ADDR with a port-number */ ++ MPTCP_MIB_ADDADDRDROP, /* Dropped incoming ADD_ADDR */ + MPTCP_MIB_JOINPORTSYNRX, /* Received a SYN MP_JOIN with a different port-number */ + MPTCP_MIB_JOINPORTSYNACKRX, /* Received a SYNACK MP_JOIN with a different port-number */ + MPTCP_MIB_JOINPORTACKRX, /* Received an ACK MP_JOIN with a different port-number */ + MPTCP_MIB_MISMATCHPORTSYNRX, /* Received a SYN MP_JOIN with a mismatched port-number */ + MPTCP_MIB_MISMATCHPORTACKRX, /* Received an ACK MP_JOIN with a mismatched port-number */ + MPTCP_MIB_RMADDR, /* Received RM_ADDR */ ++ MPTCP_MIB_RMADDRDROP, /* Dropped incoming RM_ADDR */ + MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */ + MPTCP_MIB_MPPRIOTX, /* Transmit a MP_PRIO */ + MPTCP_MIB_MPPRIORX, /* Received a MP_PRIO */ +diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c +index 6ab386ff32944..d9790d6fbce9c 100644 +--- a/net/mptcp/pm.c ++++ b/net/mptcp/pm.c +@@ -194,6 +194,8 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk, + mptcp_pm_add_addr_send_ack(msk); + } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { + pm->remote = *addr; ++ } else { ++ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); + } + + spin_unlock_bh(&pm->lock); +@@ -234,8 +236,10 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, + mptcp_event_addr_removed(msk, rm_list->ids[i]); + + spin_lock_bh(&pm->lock); +- mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED); +- pm->rm_list_rx = *rm_list; ++ if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED)) ++ pm->rm_list_rx = *rm_list; ++ else ++ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP); + spin_unlock_bh(&pm->lock); + } + +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index 5eada95dd76b3..d57d507ef83f1 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -606,6 +606,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) + unsigned int add_addr_accept_max; + struct mptcp_addr_info remote; + unsigned int subflows_max; ++ bool reset_port = false; + int i, nr; + + add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk); +@@ -615,15 +616,19 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) + msk->pm.add_addr_accepted, add_addr_accept_max, + msk->pm.remote.family); + +- if (lookup_subflow_by_daddr(&msk->conn_list, &msk->pm.remote)) ++ remote = msk->pm.remote; ++ if (lookup_subflow_by_daddr(&msk->conn_list, &remote)) + goto add_addr_echo; + ++ /* pick id 0 port, if none is provided the remote address */ ++ if (!remote.port) { ++ reset_port = true; ++ remote.port = sk->sk_dport; ++ } ++ + /* connect to the specified remote address, using whatever + * local address the routing configuration will pick. + */ +- remote = msk->pm.remote; +- if (!remote.port) +- remote.port = sk->sk_dport; + nr = fill_local_addresses_vec(msk, addrs); + + msk->pm.add_addr_accepted++; +@@ -636,8 +641,12 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) + __mptcp_subflow_connect(sk, &addrs[i], &remote); + spin_lock_bh(&msk->pm.lock); + ++ /* be sure to echo exactly the received address */ ++ if (reset_port) ++ remote.port = 0; ++ + add_addr_echo: +- mptcp_pm_announce_addr(msk, &msk->pm.remote, true); ++ mptcp_pm_announce_addr(msk, &remote, true); + mptcp_pm_nl_addr_send_ack(msk); + } + +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index c207728226372..a65b530975f54 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -6535,12 +6535,15 @@ static int nf_tables_updobj(const struct nft_ctx *ctx, + { + struct nft_object *newobj; + struct nft_trans *trans; +- int err; ++ int err = -ENOMEM; ++ ++ if (!try_module_get(type->owner)) ++ return -ENOENT; + + trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ, + sizeof(struct nft_trans_obj)); + if (!trans) +- return -ENOMEM; ++ goto err_trans; + + newobj = nft_obj_init(ctx, type, attr); + if (IS_ERR(newobj)) { +@@ -6557,6 +6560,8 @@ static int nf_tables_updobj(const struct nft_ctx *ctx, + + err_free_trans: + kfree(trans); ++err_trans: ++ module_put(type->owner); + return err; + } + +@@ -8169,7 +8174,7 @@ static void nft_obj_commit_update(struct nft_trans *trans) + if (obj->ops->update) + obj->ops->update(obj, newobj); + +- kfree(newobj); ++ nft_obj_destroy(&trans->ctx, newobj); + } + + static void nft_commit_release(struct nft_trans *trans) +@@ -8914,7 +8919,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) + break; + case NFT_MSG_NEWOBJ: + if (nft_trans_obj_update(trans)) { +- kfree(nft_trans_obj_newobj(trans)); ++ nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans)); + nft_trans_destroy(trans); + } else { + trans->ctx.table->use--; +@@ -9574,10 +9579,13 @@ EXPORT_SYMBOL_GPL(__nft_release_basechain); + + static void __nft_release_hook(struct net *net, struct nft_table *table) + { ++ struct nft_flowtable *flowtable; + struct nft_chain *chain; + + list_for_each_entry(chain, &table->chains, list) + nf_tables_unregister_hook(net, table, chain); ++ list_for_each_entry(flowtable, &table->flowtables, list) ++ nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list); + } + + static void __nft_release_hooks(struct net *net) +diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c +index 9656c16462222..2d36952b13920 100644 +--- a/net/netfilter/nf_tables_offload.c ++++ b/net/netfilter/nf_tables_offload.c +@@ -94,7 +94,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, + + expr = nft_expr_first(rule); + while (nft_expr_more(rule, expr)) { +- if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION) ++ if (expr->ops->offload_action && ++ expr->ops->offload_action(expr)) + num_actions++; + + expr = nft_expr_next(expr); +diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c +index bbf3fcba3df40..5b5c607fbf83f 100644 +--- a/net/netfilter/nft_dup_netdev.c ++++ b/net/netfilter/nft_dup_netdev.c +@@ -67,6 +67,11 @@ static int nft_dup_netdev_offload(struct nft_offload_ctx *ctx, + return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_MIRRED, oif); + } + ++static bool nft_dup_netdev_offload_action(const struct nft_expr *expr) ++{ ++ return true; ++} ++ + static struct nft_expr_type nft_dup_netdev_type; + static const struct nft_expr_ops nft_dup_netdev_ops = { + .type = &nft_dup_netdev_type, +@@ -75,6 +80,7 @@ static const struct nft_expr_ops nft_dup_netdev_ops = { + .init = nft_dup_netdev_init, + .dump = nft_dup_netdev_dump, + .offload = nft_dup_netdev_offload, ++ .offload_action = nft_dup_netdev_offload_action, + }; + + static struct nft_expr_type nft_dup_netdev_type __read_mostly = { +diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c +index cd59afde5b2f8..7730409f6f091 100644 +--- a/net/netfilter/nft_fwd_netdev.c ++++ b/net/netfilter/nft_fwd_netdev.c +@@ -77,6 +77,11 @@ static int nft_fwd_netdev_offload(struct nft_offload_ctx *ctx, + return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_REDIRECT, oif); + } + ++static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr) ++{ ++ return true; ++} ++ + struct nft_fwd_neigh { + u8 sreg_dev; + u8 sreg_addr; +@@ -219,6 +224,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = { + .dump = nft_fwd_netdev_dump, + .validate = nft_fwd_validate, + .offload = nft_fwd_netdev_offload, ++ .offload_action = nft_fwd_netdev_offload_action, + }; + + static const struct nft_expr_ops * +diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c +index 90c64d27ae532..d0f67d325bdfd 100644 +--- a/net/netfilter/nft_immediate.c ++++ b/net/netfilter/nft_immediate.c +@@ -213,6 +213,16 @@ static int nft_immediate_offload(struct nft_offload_ctx *ctx, + return 0; + } + ++static bool nft_immediate_offload_action(const struct nft_expr *expr) ++{ ++ const struct nft_immediate_expr *priv = nft_expr_priv(expr); ++ ++ if (priv->dreg == NFT_REG_VERDICT) ++ return true; ++ ++ return false; ++} ++ + static const struct nft_expr_ops nft_imm_ops = { + .type = &nft_imm_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)), +@@ -224,7 +234,7 @@ static const struct nft_expr_ops nft_imm_ops = { + .dump = nft_immediate_dump, + .validate = nft_immediate_validate, + .offload = nft_immediate_offload, +- .offload_flags = NFT_OFFLOAD_F_ACTION, ++ .offload_action = nft_immediate_offload_action, + }; + + struct nft_expr_type nft_imm_type __read_mostly = { +diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c +index 5e6459e116055..7013f55f05d1e 100644 +--- a/net/netfilter/xt_socket.c ++++ b/net/netfilter/xt_socket.c +@@ -220,8 +220,10 @@ static void socket_mt_destroy(const struct xt_mtdtor_param *par) + { + if (par->family == NFPROTO_IPV4) + nf_defrag_ipv4_disable(par->net); ++#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) + else if (par->family == NFPROTO_IPV6) +- nf_defrag_ipv4_disable(par->net); ++ nf_defrag_ipv6_disable(par->net); ++#endif + } + + static struct xt_match socket_mt_reg[] __read_mostly = { +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c +index 076774034bb96..780d9e2246f39 100644 +--- a/net/openvswitch/actions.c ++++ b/net/openvswitch/actions.c +@@ -423,12 +423,43 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, + memcpy(addr, new_addr, sizeof(__be32[4])); + } + +-static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask) ++static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask) + { ++ u8 old_ipv6_tclass = ipv6_get_dsfield(nh); ++ ++ ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask); ++ ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12), ++ (__force __wsum)(ipv6_tclass << 12)); ++ ++ ipv6_change_dsfield(nh, ~mask, ipv6_tclass); ++} ++ ++static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask) ++{ ++ u32 ofl; ++ ++ ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2]; ++ fl = OVS_MASKED(ofl, fl, mask); ++ + /* Bits 21-24 are always unmasked, so this retains their values. */ +- OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); +- OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); +- OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask); ++ nh->flow_lbl[0] = (u8)(fl >> 16); ++ nh->flow_lbl[1] = (u8)(fl >> 8); ++ nh->flow_lbl[2] = (u8)fl; ++ ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl)); ++} ++ ++static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask) ++{ ++ new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask); ++ ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8), ++ (__force __wsum)(new_ttl << 8)); ++ nh->hop_limit = new_ttl; + } + + static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl, +@@ -546,18 +577,17 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, + } + } + if (mask->ipv6_tclass) { +- ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass); ++ set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass); + flow_key->ip.tos = ipv6_get_dsfield(nh); + } + if (mask->ipv6_label) { +- set_ipv6_fl(nh, ntohl(key->ipv6_label), ++ set_ipv6_fl(skb, nh, ntohl(key->ipv6_label), + ntohl(mask->ipv6_label)); + flow_key->ipv6.label = + *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); + } + if (mask->ipv6_hlimit) { +- OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit, +- mask->ipv6_hlimit); ++ set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit); + flow_key->ip.ttl = nh->hop_limit; + } + return 0; +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c +index 2a17eb77c9049..4ffea1290ce1c 100644 +--- a/net/sched/act_ct.c ++++ b/net/sched/act_ct.c +@@ -516,11 +516,6 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p, + struct nf_conn *ct; + u8 dir; + +- /* Previously seen or loopback */ +- ct = nf_ct_get(skb, &ctinfo); +- if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED) +- return false; +- + switch (family) { + case NFPROTO_IPV4: + if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph)) +diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c +index 67e9d9fde0854..756b4dbadf36d 100644 +--- a/net/smc/smc_pnet.c ++++ b/net/smc/smc_pnet.c +@@ -112,7 +112,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) + pnettable = &sn->pnettable; + + /* remove table entry */ +- write_lock(&pnettable->lock); ++ mutex_lock(&pnettable->lock); + list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, + list) { + if (!pnet_name || +@@ -130,7 +130,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) + rc = 0; + } + } +- write_unlock(&pnettable->lock); ++ mutex_unlock(&pnettable->lock); + + /* if this is not the initial namespace, stop here */ + if (net != &init_net) +@@ -191,7 +191,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev) + sn = net_generic(net, smc_net_id); + pnettable = &sn->pnettable; + +- write_lock(&pnettable->lock); ++ mutex_lock(&pnettable->lock); + list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { + if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev && + !strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) { +@@ -205,7 +205,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev) + break; + } + } +- write_unlock(&pnettable->lock); ++ mutex_unlock(&pnettable->lock); + return rc; + } + +@@ -223,7 +223,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev) + sn = net_generic(net, smc_net_id); + pnettable = &sn->pnettable; + +- write_lock(&pnettable->lock); ++ mutex_lock(&pnettable->lock); + list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { + if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) { + dev_put(pnetelem->ndev); +@@ -236,7 +236,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev) + break; + } + } +- write_unlock(&pnettable->lock); ++ mutex_unlock(&pnettable->lock); + return rc; + } + +@@ -371,7 +371,7 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net, + + rc = -EEXIST; + new_netdev = true; +- write_lock(&pnettable->lock); ++ mutex_lock(&pnettable->lock); + list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { + if (tmp_pe->type == SMC_PNET_ETH && + !strncmp(tmp_pe->eth_name, eth_name, IFNAMSIZ)) { +@@ -381,9 +381,9 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net, + } + if (new_netdev) { + list_add_tail(&new_pe->list, &pnettable->pnetlist); +- write_unlock(&pnettable->lock); ++ mutex_unlock(&pnettable->lock); + } else { +- write_unlock(&pnettable->lock); ++ mutex_unlock(&pnettable->lock); + kfree(new_pe); + goto out_put; + } +@@ -444,7 +444,7 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name, + new_pe->ib_port = ib_port; + + new_ibdev = true; +- write_lock(&pnettable->lock); ++ mutex_lock(&pnettable->lock); + list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { + if (tmp_pe->type == SMC_PNET_IB && + !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { +@@ -454,9 +454,9 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name, + } + if (new_ibdev) { + list_add_tail(&new_pe->list, &pnettable->pnetlist); +- write_unlock(&pnettable->lock); ++ mutex_unlock(&pnettable->lock); + } else { +- write_unlock(&pnettable->lock); ++ mutex_unlock(&pnettable->lock); + kfree(new_pe); + } + return (new_ibdev) ? 0 : -EEXIST; +@@ -601,7 +601,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid, + pnettable = &sn->pnettable; + + /* dump pnettable entries */ +- read_lock(&pnettable->lock); ++ mutex_lock(&pnettable->lock); + list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { + if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid)) + continue; +@@ -616,7 +616,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid, + break; + } + } +- read_unlock(&pnettable->lock); ++ mutex_unlock(&pnettable->lock); + return idx; + } + +@@ -860,7 +860,7 @@ int smc_pnet_net_init(struct net *net) + struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev; + + INIT_LIST_HEAD(&pnettable->pnetlist); +- rwlock_init(&pnettable->lock); ++ mutex_init(&pnettable->lock); + INIT_LIST_HEAD(&pnetids_ndev->list); + rwlock_init(&pnetids_ndev->lock); + +@@ -940,7 +940,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev, + sn = net_generic(net, smc_net_id); + pnettable = &sn->pnettable; + +- read_lock(&pnettable->lock); ++ mutex_lock(&pnettable->lock); + list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { + if (pnetelem->type == SMC_PNET_ETH && ndev == pnetelem->ndev) { + /* get pnetid of netdev device */ +@@ -949,7 +949,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev, + break; + } + } +- read_unlock(&pnettable->lock); ++ mutex_unlock(&pnettable->lock); + return rc; + } + +@@ -1141,7 +1141,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port) + sn = net_generic(&init_net, smc_net_id); + pnettable = &sn->pnettable; + +- read_lock(&pnettable->lock); ++ mutex_lock(&pnettable->lock); + list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { + if (tmp_pe->type == SMC_PNET_IB && + !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX) && +@@ -1151,7 +1151,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port) + break; + } + } +- read_unlock(&pnettable->lock); ++ mutex_unlock(&pnettable->lock); + + return rc; + } +@@ -1170,7 +1170,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev) + sn = net_generic(&init_net, smc_net_id); + pnettable = &sn->pnettable; + +- read_lock(&pnettable->lock); ++ mutex_lock(&pnettable->lock); + list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { + if (tmp_pe->type == SMC_PNET_IB && + !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { +@@ -1179,7 +1179,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev) + break; + } + } +- read_unlock(&pnettable->lock); ++ mutex_unlock(&pnettable->lock); + + return rc; + } +diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h +index 14039272f7e42..80a88eea49491 100644 +--- a/net/smc/smc_pnet.h ++++ b/net/smc/smc_pnet.h +@@ -29,7 +29,7 @@ struct smc_link_group; + * @pnetlist: List of PNETIDs + */ + struct smc_pnettable { +- rwlock_t lock; ++ struct mutex lock; + struct list_head pnetlist; + }; + +diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c +index 01396dd1c899b..1d8ba233d0474 100644 +--- a/net/tipc/name_table.c ++++ b/net/tipc/name_table.c +@@ -967,7 +967,7 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg, + list_for_each_entry(p, &sr->all_publ, all_publ) + if (p->key == *last_key) + break; +- if (p->key != *last_key) ++ if (list_entry_is_head(p, &sr->all_publ, all_publ)) + return -EPIPE; + } else { + p = list_first_entry(&sr->all_publ, +diff --git a/net/tipc/socket.c b/net/tipc/socket.c +index 3e63c83e641c5..7545321c3440b 100644 +--- a/net/tipc/socket.c ++++ b/net/tipc/socket.c +@@ -3749,7 +3749,7 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb, + if (p->key == *last_publ) + break; + } +- if (p->key != *last_publ) { ++ if (list_entry_is_head(p, &tsk->publications, binding_sock)) { + /* We never set seq or call nl_dump_check_consistent() + * this means that setting prev_seq here will cause the + * consistence check to fail in the netlink callback +diff --git a/security/selinux/ima.c b/security/selinux/ima.c +index 727c4e43219d7..ff7aea6b3774a 100644 +--- a/security/selinux/ima.c ++++ b/security/selinux/ima.c +@@ -77,7 +77,7 @@ void selinux_ima_measure_state_locked(struct selinux_state *state) + size_t policy_len; + int rc = 0; + +- WARN_ON(!mutex_is_locked(&state->policy_mutex)); ++ lockdep_assert_held(&state->policy_mutex); + + state_str = selinux_ima_collect_state(state); + if (!state_str) { +@@ -117,7 +117,7 @@ void selinux_ima_measure_state_locked(struct selinux_state *state) + */ + void selinux_ima_measure_state(struct selinux_state *state) + { +- WARN_ON(mutex_is_locked(&state->policy_mutex)); ++ lockdep_assert_not_held(&state->policy_mutex); + + mutex_lock(&state->policy_mutex); + selinux_ima_measure_state_locked(state); +diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c +index f5d260b1df4d1..15a4547d608ec 100644 +--- a/tools/perf/util/data.c ++++ b/tools/perf/util/data.c +@@ -44,10 +44,6 @@ int perf_data__create_dir(struct perf_data *data, int nr) + if (!files) + return -ENOMEM; + +- data->dir.version = PERF_DIR_VERSION; +- data->dir.files = files; +- data->dir.nr = nr; +- + for (i = 0; i < nr; i++) { + struct perf_data_file *file = &files[i]; + +@@ -62,6 +58,9 @@ int perf_data__create_dir(struct perf_data *data, int nr) + file->fd = ret; + } + ++ data->dir.version = PERF_DIR_VERSION; ++ data->dir.files = files; ++ data->dir.nr = nr; + return 0; + + out_err: +diff --git a/tools/perf/util/evlist-hybrid.c b/tools/perf/util/evlist-hybrid.c +index 7c554234b43d4..f39c8ffc5a111 100644 +--- a/tools/perf/util/evlist-hybrid.c ++++ b/tools/perf/util/evlist-hybrid.c +@@ -153,8 +153,8 @@ int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list) + perf_cpu_map__put(matched_cpus); + perf_cpu_map__put(unmatched_cpus); + } +- +- ret = (unmatched_count == events_nr) ? -1 : 0; ++ if (events_nr) ++ ret = (unmatched_count == events_nr) ? -1 : 0; + out: + perf_cpu_map__put(cpus); + return ret; +diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h +index 2966564b8497a..6c85b00f27b2e 100644 +--- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h ++++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h +@@ -235,7 +235,7 @@ SEC("sk_msg1") + int bpf_prog4(struct sk_msg_md *msg) + { + int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5; +- int *start, *end, *start_push, *end_push, *start_pop, *pop; ++ int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0; + + bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); + if (bytes) +@@ -249,8 +249,11 @@ int bpf_prog4(struct sk_msg_md *msg) + bpf_msg_pull_data(msg, *start, *end, 0); + start_push = bpf_map_lookup_elem(&sock_bytes, &two); + end_push = bpf_map_lookup_elem(&sock_bytes, &three); +- if (start_push && end_push) +- bpf_msg_push_data(msg, *start_push, *end_push, 0); ++ if (start_push && end_push) { ++ err = bpf_msg_push_data(msg, *start_push, *end_push, 0); ++ if (err) ++ return SK_DROP; ++ } + start_pop = bpf_map_lookup_elem(&sock_bytes, &four); + pop = bpf_map_lookup_elem(&sock_bytes, &five); + if (start_pop && pop) +@@ -263,6 +266,7 @@ int bpf_prog6(struct sk_msg_md *msg) + { + int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0; + int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f; ++ int err = 0; + __u64 flags = 0; + + bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); +@@ -279,8 +283,11 @@ int bpf_prog6(struct sk_msg_md *msg) + + start_push = bpf_map_lookup_elem(&sock_bytes, &two); + end_push = bpf_map_lookup_elem(&sock_bytes, &three); +- if (start_push && end_push) +- bpf_msg_push_data(msg, *start_push, *end_push, 0); ++ if (start_push && end_push) { ++ err = bpf_msg_push_data(msg, *start_push, *end_push, 0); ++ if (err) ++ return SK_DROP; ++ } + + start_pop = bpf_map_lookup_elem(&sock_bytes, &four); + pop = bpf_map_lookup_elem(&sock_bytes, &five); +@@ -338,7 +345,7 @@ SEC("sk_msg5") + int bpf_prog10(struct sk_msg_md *msg) + { + int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop; +- int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5; ++ int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0; + + bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); + if (bytes) +@@ -352,8 +359,11 @@ int bpf_prog10(struct sk_msg_md *msg) + bpf_msg_pull_data(msg, *start, *end, 0); + start_push = bpf_map_lookup_elem(&sock_bytes, &two); + end_push = bpf_map_lookup_elem(&sock_bytes, &three); +- if (start_push && end_push) +- bpf_msg_push_data(msg, *start_push, *end_push, 0); ++ if (start_push && end_push) { ++ err = bpf_msg_push_data(msg, *start_push, *end_push, 0); ++ if (err) ++ return SK_PASS; ++ } + start_pop = bpf_map_lookup_elem(&sock_bytes, &four); + pop = bpf_map_lookup_elem(&sock_bytes, &five); + if (start_pop && pop) +diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh +index 2674ba20d5249..ff821025d3096 100755 +--- a/tools/testing/selftests/net/mptcp/diag.sh ++++ b/tools/testing/selftests/net/mptcp/diag.sh +@@ -71,6 +71,36 @@ chk_msk_remote_key_nr() + __chk_nr "grep -c remote_key" $* + } + ++# $1: ns, $2: port ++wait_local_port_listen() ++{ ++ local listener_ns="${1}" ++ local port="${2}" ++ ++ local port_hex i ++ ++ port_hex="$(printf "%04X" "${port}")" ++ for i in $(seq 10); do ++ ip netns exec "${listener_ns}" cat /proc/net/tcp | \ ++ awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" && ++ break ++ sleep 0.1 ++ done ++} ++ ++wait_connected() ++{ ++ local listener_ns="${1}" ++ local port="${2}" ++ ++ local port_hex i ++ ++ port_hex="$(printf "%04X" "${port}")" ++ for i in $(seq 10); do ++ ip netns exec ${listener_ns} grep -q " 0100007F:${port_hex} " /proc/net/tcp && break ++ sleep 0.1 ++ done ++} + + trap cleanup EXIT + ip netns add $ns +@@ -81,15 +111,15 @@ echo "a" | \ + ip netns exec $ns \ + ./mptcp_connect -p 10000 -l -t ${timeout_poll} \ + 0.0.0.0 >/dev/null & +-sleep 0.1 ++wait_local_port_listen $ns 10000 + chk_msk_nr 0 "no msk on netns creation" + + echo "b" | \ + timeout ${timeout_test} \ + ip netns exec $ns \ +- ./mptcp_connect -p 10000 -j -t ${timeout_poll} \ ++ ./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} \ + 127.0.0.1 >/dev/null & +-sleep 0.1 ++wait_connected $ns 10000 + chk_msk_nr 2 "after MPC handshake " + chk_msk_remote_key_nr 2 "....chk remote_key" + chk_msk_fallback_nr 0 "....chk no fallback" +@@ -101,13 +131,13 @@ echo "a" | \ + ip netns exec $ns \ + ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \ + 0.0.0.0 >/dev/null & +-sleep 0.1 ++wait_local_port_listen $ns 10001 + echo "b" | \ + timeout ${timeout_test} \ + ip netns exec $ns \ +- ./mptcp_connect -p 10001 -j -t ${timeout_poll} \ ++ ./mptcp_connect -p 10001 -r 0 -t ${timeout_poll} \ + 127.0.0.1 >/dev/null & +-sleep 0.1 ++wait_connected $ns 10001 + chk_msk_fallback_nr 1 "check fallback" + flush_pids + +@@ -119,7 +149,7 @@ for I in `seq 1 $NR_CLIENTS`; do + ./mptcp_connect -p $((I+10001)) -l -w 10 \ + -t ${timeout_poll} 0.0.0.0 >/dev/null & + done +-sleep 0.1 ++wait_local_port_listen $ns $((NR_CLIENTS + 10001)) + + for I in `seq 1 $NR_CLIENTS`; do + echo "b" | \ +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index f06dc9dfe15eb..f4f0e3eb3b921 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -624,6 +624,7 @@ chk_join_nr() + local ack_nr=$4 + local count + local dump_stats ++ local with_cookie + + printf "%02u %-36s %s" "$TEST_COUNT" "$msg" "syn" + count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinSynRx | awk '{print $2}'` +@@ -637,12 +638,20 @@ chk_join_nr() + fi + + echo -n " - synack" ++ with_cookie=`ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies` + count=`ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinSynAckRx | awk '{print $2}'` + [ -z "$count" ] && count=0 + if [ "$count" != "$syn_ack_nr" ]; then +- echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr" +- ret=1 +- dump_stats=1 ++ # simult connections exceeding the limit with cookie enabled could go up to ++ # synack validation as the conn limit can be enforced reliably only after ++ # the subflow creation ++ if [ "$with_cookie" = 2 ] && [ "$count" -gt "$syn_ack_nr" ] && [ "$count" -le "$syn_nr" ]; then ++ echo -n "[ ok ]" ++ else ++ echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr" ++ ret=1 ++ dump_stats=1 ++ fi + else + echo -n "[ ok ]" + fi -- cgit v1.2.3-65-gdbad