summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2024-02-16 14:00:20 -0500
committerMike Pagano <mpagano@gentoo.org>2024-02-16 14:00:20 -0500
commit09ae4eccbe9b346bb7300a09df69bfb5495388e5 (patch)
treeeb7d5eaac979c4a23170b2245f2958149975b5ba
parentLinux patch 6.1.77 (diff)
downloadlinux-patches-09ae4eccbe9b346bb7300a09df69bfb5495388e5.tar.gz
linux-patches-09ae4eccbe9b346bb7300a09df69bfb5495388e5.tar.bz2
linux-patches-09ae4eccbe9b346bb7300a09df69bfb5495388e5.zip
Linux patch 6.1.786.1-87
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1077_linux-6.1.78.patch2252
2 files changed, 2256 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 264766b5..7d732a84 100644
--- a/0000_README
+++ b/0000_README
@@ -351,6 +351,10 @@ Patch: 1076_linux-6.1.77.patch
From: https://www.kernel.org
Desc: Linux 6.1.77
+Patch: 1077_linux-6.1.78.patch
+From: https://www.kernel.org
+Desc: Linux 6.1.78
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1077_linux-6.1.78.patch b/1077_linux-6.1.78.patch
new file mode 100644
index 00000000..d0f0c097
--- /dev/null
+++ b/1077_linux-6.1.78.patch
@@ -0,0 +1,2252 @@
+diff --git a/Makefile b/Makefile
+index f5598d90093f5..e93554269e474 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 77
++SUBLEVEL = 78
+ EXTRAVERSION =
+ NAME = Curry Ramen
+
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 6eaf2b0ad7cca..aefdf07bdc2cf 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -864,7 +864,16 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
+ */
+ blk_flush_plug(current->plug, false);
+
+- if (bio_queue_enter(bio))
++ /*
++ * We need to be able to enter a frozen queue, similar to how
++ * timeouts also need to do that. If that is blocked, then we can
++ * have pending IO when a queue freeze is started, and then the
++ * wait for the freeze to finish will wait for polled requests to
++ * timeout as the poller is preventer from entering the queue and
++ * completing them. As long as we prevent new IO from being queued,
++ * that should be all that matters.
++ */
++ if (!percpu_ref_tryget(&q->q_usage_counter))
+ return 0;
+ if (queue_is_mq(q)) {
+ ret = blk_mq_poll(q, cookie, iob, flags);
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 7dd6a33e1d6a8..e6557024e3da8 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -1337,6 +1337,13 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
+
+ lockdep_assert_held(&iocg->waitq.lock);
+
++ /*
++ * If the delay is set by another CPU, we may be in the past. No need to
++ * change anything if so. This avoids decay calculation underflow.
++ */
++ if (time_before64(now->now, iocg->delay_at))
++ return false;
++
+ /* calculate the current delay in effect - 1/2 every second */
+ tdelta = now->now - iocg->delay_at;
+ if (iocg->delay)
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index 49cb4537344aa..2daf50d4cd47a 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -2930,6 +2930,8 @@ open_card_ubr0(struct idt77252_dev *card)
+ vc->scq = alloc_scq(card, vc->class);
+ if (!vc->scq) {
+ printk("%s: can't get SCQ.\n", card->name);
++ kfree(card->vcs[0]);
++ card->vcs[0] = NULL;
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+index 8dd40d00a672a..6b829d347417a 100644
+--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
++++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+@@ -38,15 +38,17 @@ static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
+ if (!dpaa2_chan->fd_pool)
+ goto err;
+
+- dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
+- sizeof(struct dpaa2_fl_entry),
+- sizeof(struct dpaa2_fl_entry), 0);
++ dpaa2_chan->fl_pool =
++ dma_pool_create("fl_pool", dev,
++ sizeof(struct dpaa2_fl_entry) * 3,
++ sizeof(struct dpaa2_fl_entry), 0);
++
+ if (!dpaa2_chan->fl_pool)
+ goto err_fd;
+
+ dpaa2_chan->sdd_pool =
+ dma_pool_create("sdd_pool", dev,
+- sizeof(struct dpaa2_qdma_sd_d),
++ sizeof(struct dpaa2_qdma_sd_d) * 2,
+ sizeof(struct dpaa2_qdma_sd_d), 0);
+ if (!dpaa2_chan->sdd_pool)
+ goto err_fl;
+diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
+index 045ead46ec8fc..69385f32e2756 100644
+--- a/drivers/dma/fsl-qdma.c
++++ b/drivers/dma/fsl-qdma.c
+@@ -514,11 +514,11 @@ static struct fsl_qdma_queue
+ queue_temp = queue_head + i + (j * queue_num);
+
+ queue_temp->cq =
+- dma_alloc_coherent(&pdev->dev,
+- sizeof(struct fsl_qdma_format) *
+- queue_size[i],
+- &queue_temp->bus_addr,
+- GFP_KERNEL);
++ dmam_alloc_coherent(&pdev->dev,
++ sizeof(struct fsl_qdma_format) *
++ queue_size[i],
++ &queue_temp->bus_addr,
++ GFP_KERNEL);
+ if (!queue_temp->cq)
+ return NULL;
+ queue_temp->block_base = fsl_qdma->block_base +
+@@ -563,11 +563,11 @@ static struct fsl_qdma_queue
+ /*
+ * Buffer for queue command
+ */
+- status_head->cq = dma_alloc_coherent(&pdev->dev,
+- sizeof(struct fsl_qdma_format) *
+- status_size,
+- &status_head->bus_addr,
+- GFP_KERNEL);
++ status_head->cq = dmam_alloc_coherent(&pdev->dev,
++ sizeof(struct fsl_qdma_format) *
++ status_size,
++ &status_head->bus_addr,
++ GFP_KERNEL);
+ if (!status_head->cq) {
+ devm_kfree(&pdev->dev, status_head);
+ return NULL;
+@@ -1272,8 +1272,6 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
+
+ static int fsl_qdma_remove(struct platform_device *pdev)
+ {
+- int i;
+- struct fsl_qdma_queue *status;
+ struct device_node *np = pdev->dev.of_node;
+ struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
+
+@@ -1282,11 +1280,6 @@ static int fsl_qdma_remove(struct platform_device *pdev)
+ of_dma_controller_free(np);
+ dma_async_device_unregister(&fsl_qdma->dma_dev);
+
+- for (i = 0; i < fsl_qdma->block_number; i++) {
+- status = fsl_qdma->status[i];
+- dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
+- status->n_cq, status->cq, status->bus_addr);
+- }
+ return 0;
+ }
+
+diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
+index b86b809eb1f7e..82e7acfda6ed0 100644
+--- a/drivers/dma/ti/k3-udma.c
++++ b/drivers/dma/ti/k3-udma.c
+@@ -3963,6 +3963,7 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+ {
+ struct udma_chan *uc = to_udma_chan(&vc->chan);
+ struct udma_desc *d;
++ u8 status;
+
+ if (!vd)
+ return;
+@@ -3972,12 +3973,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+ if (d->metadata_size)
+ udma_fetch_epib(uc, d);
+
+- /* Provide residue information for the client */
+ if (result) {
+ void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
+
+ if (cppi5_desc_get_type(desc_vaddr) ==
+ CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
++ /* Provide residue information for the client */
+ result->residue = d->residue -
+ cppi5_hdesc_get_pktlen(desc_vaddr);
+ if (result->residue)
+@@ -3986,7 +3987,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
+ result->result = DMA_TRANS_NOERROR;
+ } else {
+ result->residue = 0;
+- result->result = DMA_TRANS_NOERROR;
++ /* Propagate TR Response errors to the client */
++ status = d->hwdesc[0].tr_resp_base->status;
++ if (status)
++ result->result = DMA_TRANS_ABORTED;
++ else
++ result->result = DMA_TRANS_NOERROR;
+ }
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+index f04595b750abc..5ec3f50a72acd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+@@ -1014,7 +1014,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id
+ vpg = dcn301_vpg_create(ctx, vpg_inst);
+ afmt = dcn301_afmt_create(ctx, afmt_inst);
+
+- if (!enc1 || !vpg || !afmt) {
++ if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
+diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
+index daac2050d77d0..6f531bb61f7e5 100644
+--- a/drivers/gpu/drm/i915/gvt/handlers.c
++++ b/drivers/gpu/drm/i915/gvt/handlers.c
+@@ -2844,8 +2844,7 @@ static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
+ for (i = start; i < end; i += 4) {
+ p = intel_gvt_find_mmio_info(gvt, i);
+ if (p) {
+- WARN(1, "dup mmio definition offset %x\n",
+- info->offset);
++ WARN(1, "dup mmio definition offset %x\n", i);
+
+ /* We return -EEXIST here to make GVT-g load fail.
+ * So duplicated MMIO can be found as soon as
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 38d38f923df64..25245ef386db6 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -2053,7 +2053,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
+ }
+
+ /* reset the merge 3D HW block */
+- if (phys_enc->hw_pp->merge_3d) {
++ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
+ phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+ BLEND_3D_NONE);
+ if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
+@@ -2069,7 +2069,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
+ if (phys_enc->hw_wb)
+ intf_cfg.wb = phys_enc->hw_wb->idx;
+
+- if (phys_enc->hw_pp->merge_3d)
++ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
+ intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
+
+ if (ctl->ops.reset_intf_cfg)
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index 103eef9f059a0..b20701893e5b3 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -133,11 +133,6 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
+ tbd = dp_link_get_test_bits_depth(ctrl->link,
+ ctrl->panel->dp_mode.bpp);
+
+- if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
+- pr_debug("BIT_DEPTH not set. Configure default\n");
+- tbd = DP_TEST_BIT_DEPTH_8;
+- }
+-
+ config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
+
+ /* Num of Lanes */
+diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
+index cb66d1126ea96..ceb382fa56d5b 100644
+--- a/drivers/gpu/drm/msm/dp/dp_link.c
++++ b/drivers/gpu/drm/msm/dp/dp_link.c
+@@ -7,6 +7,7 @@
+
+ #include <drm/drm_print.h>
+
++#include "dp_reg.h"
+ #include "dp_link.h"
+ #include "dp_panel.h"
+
+@@ -1075,7 +1076,7 @@ int dp_link_process_request(struct dp_link *dp_link)
+
+ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+ {
+- u32 cc;
++ u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB;
+ struct dp_link_private *link;
+
+ if (!dp_link) {
+@@ -1089,10 +1090,11 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+ * Unless a video pattern CTS test is ongoing, use RGB_VESA
+ * Only RGB_VESA and RGB_CEA supported for now
+ */
+- if (dp_link_is_video_pattern_requested(link))
+- cc = link->dp_link.test_video.test_dyn_range;
+- else
+- cc = DP_TEST_DYNAMIC_RANGE_VESA;
++ if (dp_link_is_video_pattern_requested(link)) {
++ if (link->dp_link.test_video.test_dyn_range &
++ DP_TEST_DYNAMIC_RANGE_CEA)
++ cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB;
++ }
+
+ return cc;
+ }
+@@ -1172,6 +1174,9 @@ void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
+ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+ {
+ u32 tbd;
++ struct dp_link_private *link;
++
++ link = container_of(dp_link, struct dp_link_private, dp_link);
+
+ /*
+ * Few simplistic rules and assumptions made here:
+@@ -1189,12 +1194,13 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+ tbd = DP_TEST_BIT_DEPTH_10;
+ break;
+ default:
+- tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
++ drm_dbg_dp(link->drm_dev, "bpp=%d not supported, use bpc=8\n",
++ bpp);
++ tbd = DP_TEST_BIT_DEPTH_8;
+ break;
+ }
+
+- if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
+- tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
++ tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
+
+ return tbd;
+ }
+diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
+index 268602803d9a3..176a503ece9c0 100644
+--- a/drivers/gpu/drm/msm/dp/dp_reg.h
++++ b/drivers/gpu/drm/msm/dp/dp_reg.h
+@@ -129,6 +129,9 @@
+ #define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001)
+ #define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005)
+
++#define DP_MISC0_COLORIMERY_CFG_LEGACY_RGB (0)
++#define DP_MISC0_COLORIMERY_CFG_CEA_RGB (0x04)
++
+ #define REG_DP_VALID_BOUNDARY (0x00000030)
+ #define REG_DP_VALID_BOUNDARY_2 (0x00000034)
+
+diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
+index d11f674e3dc37..51f321bcd778a 100644
+--- a/drivers/hwmon/aspeed-pwm-tacho.c
++++ b/drivers/hwmon/aspeed-pwm-tacho.c
+@@ -194,6 +194,8 @@ struct aspeed_pwm_tacho_data {
+ u8 fan_tach_ch_source[16];
+ struct aspeed_cooling_device *cdev[8];
+ const struct attribute_group *groups[3];
++ /* protects access to shared ASPEED_PTCR_RESULT */
++ struct mutex tach_lock;
+ };
+
+ enum type { TYPEM, TYPEN, TYPEO };
+@@ -528,6 +530,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
+ u8 fan_tach_ch_source, type, mode, both;
+ int ret;
+
++ mutex_lock(&priv->tach_lock);
++
+ regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0);
+ regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0x1 << fan_tach_ch);
+
+@@ -545,6 +549,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
+ ASPEED_RPM_STATUS_SLEEP_USEC,
+ usec);
+
++ mutex_unlock(&priv->tach_lock);
++
+ /* return -ETIMEDOUT if we didn't get an answer. */
+ if (ret)
+ return ret;
+@@ -904,6 +910,7 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
++ mutex_init(&priv->tach_lock);
+ priv->regmap = devm_regmap_init(dev, NULL, (__force void *)regs,
+ &aspeed_pwm_tacho_regmap_config);
+ if (IS_ERR(priv->regmap))
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 09aab5859fa75..59344ad62822d 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -380,7 +380,7 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+ }
+
+ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
+- int attr_no)
++ int index)
+ {
+ int i;
+ static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
+@@ -392,13 +392,20 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
+ };
+
+ for (i = 0; i < tdata->attr_size; i++) {
++ /*
++ * We map the attr number to core id of the CPU
++ * The attr number is always core id + 2
++ * The Pkgtemp will always show up as temp1_*, if available
++ */
++ int attr_no = tdata->is_pkg_data ? 1 : tdata->cpu_core_id + 2;
++
+ snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
+ "temp%d_%s", attr_no, suffixes[i]);
+ sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
+ tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
+ tdata->sd_attrs[i].dev_attr.attr.mode = 0444;
+ tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
+- tdata->sd_attrs[i].index = attr_no;
++ tdata->sd_attrs[i].index = index;
+ tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr;
+ }
+ tdata->attr_group.attrs = tdata->attrs;
+@@ -456,27 +463,22 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
+ struct platform_data *pdata = platform_get_drvdata(pdev);
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ u32 eax, edx;
+- int err, index, attr_no;
++ int err, index;
+
+ /*
+- * Find attr number for sysfs:
+- * We map the attr number to core id of the CPU
+- * The attr number is always core id + 2
+- * The Pkgtemp will always show up as temp1_*, if available
++ * Get the index of tdata in pdata->core_data[]
++ * tdata for package: pdata->core_data[1]
++ * tdata for core: pdata->core_data[2] .. pdata->core_data[NUM_REAL_CORES + 1]
+ */
+ if (pkg_flag) {
+- attr_no = PKG_SYSFS_ATTR_NO;
++ index = PKG_SYSFS_ATTR_NO;
+ } else {
+- index = ida_alloc(&pdata->ida, GFP_KERNEL);
++ index = ida_alloc_max(&pdata->ida, NUM_REAL_CORES - 1, GFP_KERNEL);
+ if (index < 0)
+ return index;
+- pdata->cpu_map[index] = topology_core_id(cpu);
+- attr_no = index + BASE_SYSFS_ATTR_NO;
+- }
+
+- if (attr_no > MAX_CORE_DATA - 1) {
+- err = -ERANGE;
+- goto ida_free;
++ pdata->cpu_map[index] = topology_core_id(cpu);
++ index += BASE_SYSFS_ATTR_NO;
+ }
+
+ tdata = init_temp_data(cpu, pkg_flag);
+@@ -508,20 +510,20 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
+ }
+ }
+
+- pdata->core_data[attr_no] = tdata;
++ pdata->core_data[index] = tdata;
+
+ /* Create sysfs interfaces */
+- err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no);
++ err = create_core_attrs(tdata, pdata->hwmon_dev, index);
+ if (err)
+ goto exit_free;
+
+ return 0;
+ exit_free:
+- pdata->core_data[attr_no] = NULL;
++ pdata->core_data[index] = NULL;
+ kfree(tdata);
+ ida_free:
+ if (!pkg_flag)
+- ida_free(&pdata->ida, index);
++ ida_free(&pdata->ida, index - BASE_SYSFS_ATTR_NO);
+ return err;
+ }
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 447e1bcc82a32..4859b99d54fc2 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -2825,7 +2825,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwmr->ibmr.iova = virt;
+- iwmr->page_size = PAGE_SIZE;
++ iwmr->page_size = SZ_4K;
+
+ if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
+ iwmr->page_size = ib_umem_find_best_pgsz(region,
+diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
+index c4d8caadec59e..661d6c8b059bf 100644
+--- a/drivers/input/keyboard/atkbd.c
++++ b/drivers/input/keyboard/atkbd.c
+@@ -792,7 +792,6 @@ static int atkbd_probe(struct atkbd *atkbd)
+ {
+ struct ps2dev *ps2dev = &atkbd->ps2dev;
+ unsigned char param[2];
+- bool skip_getid;
+
+ /*
+ * Some systems, where the bit-twiddling when testing the io-lines of the
+@@ -806,6 +805,11 @@ static int atkbd_probe(struct atkbd *atkbd)
+ "keyboard reset failed on %s\n",
+ ps2dev->serio->phys);
+
++ if (atkbd_skip_getid(atkbd)) {
++ atkbd->id = 0xab83;
++ return 0;
++ }
++
+ /*
+ * Then we check the keyboard ID. We should get 0xab83 under normal conditions.
+ * Some keyboards report different values, but the first byte is always 0xab or
+@@ -814,18 +818,17 @@ static int atkbd_probe(struct atkbd *atkbd)
+ */
+
+ param[0] = param[1] = 0xa5; /* initialize with invalid values */
+- skip_getid = atkbd_skip_getid(atkbd);
+- if (skip_getid || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
++ if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
+
+ /*
+- * If the get ID command was skipped or failed, we check if we can at least set
++ * If the get ID command failed, we check if we can at least set
+ * the LEDs on the keyboard. This should work on every keyboard out there.
+ * It also turns the LEDs off, which we want anyway.
+ */
+ param[0] = 0;
+ if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
+ return -1;
+- atkbd->id = skip_getid ? 0xab83 : 0xabba;
++ atkbd->id = 0xabba;
+ return 0;
+ }
+
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index b585b1dab870e..cd45a65e17f2c 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -1208,6 +1208,12 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
+ SERIO_QUIRK_NOPNP)
+ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "NS5x_7xPU"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
++ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
+diff --git a/drivers/mtd/parsers/ofpart_core.c b/drivers/mtd/parsers/ofpart_core.c
+index 192190c42fc84..e7b8e9d0a9103 100644
+--- a/drivers/mtd/parsers/ofpart_core.c
++++ b/drivers/mtd/parsers/ofpart_core.c
+@@ -122,6 +122,25 @@ static int parse_fixed_partitions(struct mtd_info *master,
+
+ a_cells = of_n_addr_cells(pp);
+ s_cells = of_n_size_cells(pp);
++ if (!dedicated && s_cells == 0) {
++ /*
++ * This is a ugly workaround to not create
++ * regression on devices that are still creating
++ * partitions as direct children of the nand controller.
++ * This can happen in case the nand controller node has
++ * #size-cells equal to 0 and the firmware (e.g.
++ * U-Boot) just add the partitions there assuming
++ * 32-bit addressing.
++ *
++ * If you get this warning your firmware and/or DTS
++ * should be really fixed.
++ *
++ * This is working only for devices smaller than 4GiB.
++ */
++ pr_warn("%s: ofpart partition %pOF (%pOF) #size-cells is wrongly set to <0>, assuming <1> for parsing partitions.\n",
++ master->name, pp, mtd_node);
++ s_cells = 1;
++ }
+ if (len / 4 != a_cells + s_cells) {
+ pr_debug("%s: ofpart partition %pOF (%pOF) error parsing reg property.\n",
+ master->name, pp,
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+index abd4832e4ed21..5acb3e16b5677 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+@@ -993,7 +993,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+ return 0;
+
+ err_exit_hwts_rx:
+- aq_ring_free(&aq_ptp->hwts_rx);
++ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
+ err_exit_ptp_rx:
+ aq_ring_free(&aq_ptp->ptp_rx);
+ err_exit_ptp_tx:
+@@ -1011,7 +1011,7 @@ void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
+
+ aq_ring_free(&aq_ptp->ptp_tx);
+ aq_ring_free(&aq_ptp->ptp_rx);
+- aq_ring_free(&aq_ptp->hwts_rx);
++ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
+
+ aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
+ }
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 9c314fe14ab62..0eaaba3a18ee0 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -919,6 +919,19 @@ void aq_ring_free(struct aq_ring_s *self)
+ }
+ }
+
++void aq_ring_hwts_rx_free(struct aq_ring_s *self)
++{
++ if (!self)
++ return;
++
++ if (self->dx_ring) {
++ dma_free_coherent(aq_nic_get_dev(self->aq_nic),
++ self->size * self->dx_size + AQ_CFG_RXDS_DEF,
++ self->dx_ring, self->dx_ring_pa);
++ self->dx_ring = NULL;
++ }
++}
++
+ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
+ {
+ unsigned int count;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+index 52847310740a2..d627ace850ff5 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+@@ -210,6 +210,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self);
+ int aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic, unsigned int idx,
+ unsigned int size, unsigned int dx_size);
++void aq_ring_hwts_rx_free(struct aq_ring_s *self);
+ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
+
+ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index 0f896f606c3e6..c00d6d67db518 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -930,8 +930,11 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
+ if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
+ err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
+ sizeof(*sq->timestamps));
+- if (err)
++ if (err) {
++ kfree(sq->sg);
++ sq->sg = NULL;
+ return err;
++ }
+ }
+
+ sq->head = 0;
+@@ -947,7 +950,14 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
+ sq->stats.bytes = 0;
+ sq->stats.pkts = 0;
+
+- return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
++ err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
++ if (err) {
++ kfree(sq->sg);
++ sq->sg = NULL;
++ return err;
++ }
++
++ return 0;
+
+ }
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
+index 54bb072aeb2d3..c11d626856247 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -209,6 +209,7 @@ struct stmmac_safety_stats {
+ unsigned long mac_errors[32];
+ unsigned long mtl_errors[32];
+ unsigned long dma_errors[32];
++ unsigned long dma_dpp_errors[32];
+ };
+
+ /* Number of fields in Safety Stats */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+index 880a75bf2eb1f..8748c37e9dac9 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+@@ -282,6 +282,8 @@
+ #define XGMAC_RXCEIE BIT(4)
+ #define XGMAC_TXCEIE BIT(0)
+ #define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
++#define XGMAC_MTL_DPP_CONTROL 0x000010e0
++#define XGMAC_DPP_DISABLE BIT(0)
+ #define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
+ #define XGMAC_TQS GENMASK(25, 16)
+ #define XGMAC_TQS_SHIFT 16
+@@ -364,6 +366,7 @@
+ #define XGMAC_DCEIE BIT(1)
+ #define XGMAC_TCEIE BIT(0)
+ #define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
++#define XGMAC_DMA_DPP_INT_STATUS 0x00003074
+ #define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
+ #define XGMAC_SPH BIT(24)
+ #define XGMAC_PBLx8 BIT(16)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index c2181c277291b..ec1616ffbfa7a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -789,6 +789,44 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+ };
+
++#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
++#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
++
++static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
++ { true, "TDPES0", DPP_TX_ERR },
++ { true, "TDPES1", DPP_TX_ERR },
++ { true, "TDPES2", DPP_TX_ERR },
++ { true, "TDPES3", DPP_TX_ERR },
++ { true, "TDPES4", DPP_TX_ERR },
++ { true, "TDPES5", DPP_TX_ERR },
++ { true, "TDPES6", DPP_TX_ERR },
++ { true, "TDPES7", DPP_TX_ERR },
++ { true, "TDPES8", DPP_TX_ERR },
++ { true, "TDPES9", DPP_TX_ERR },
++ { true, "TDPES10", DPP_TX_ERR },
++ { true, "TDPES11", DPP_TX_ERR },
++ { true, "TDPES12", DPP_TX_ERR },
++ { true, "TDPES13", DPP_TX_ERR },
++ { true, "TDPES14", DPP_TX_ERR },
++ { true, "TDPES15", DPP_TX_ERR },
++ { true, "RDPES0", DPP_RX_ERR },
++ { true, "RDPES1", DPP_RX_ERR },
++ { true, "RDPES2", DPP_RX_ERR },
++ { true, "RDPES3", DPP_RX_ERR },
++ { true, "RDPES4", DPP_RX_ERR },
++ { true, "RDPES5", DPP_RX_ERR },
++ { true, "RDPES6", DPP_RX_ERR },
++ { true, "RDPES7", DPP_RX_ERR },
++ { true, "RDPES8", DPP_RX_ERR },
++ { true, "RDPES9", DPP_RX_ERR },
++ { true, "RDPES10", DPP_RX_ERR },
++ { true, "RDPES11", DPP_RX_ERR },
++ { true, "RDPES12", DPP_RX_ERR },
++ { true, "RDPES13", DPP_RX_ERR },
++ { true, "RDPES14", DPP_RX_ERR },
++ { true, "RDPES15", DPP_RX_ERR },
++};
++
+ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+@@ -800,6 +838,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
+
+ dwxgmac3_log_error(ndev, value, correctable, "DMA",
+ dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
++
++ value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
++ writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
++
++ dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
++ dwxgmac3_dma_dpp_errors,
++ STAT_OFF(dma_dpp_errors), stats);
+ }
+
+ static int
+@@ -838,6 +883,12 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
+ writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+
++ /* 5. Enable Data Path Parity Protection */
++ value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
++ /* already enabled by default, explicit enable it again */
++ value &= ~XGMAC_DPP_DISABLE;
++ writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
++
+ return 0;
+ }
+
+@@ -871,7 +922,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
+ ret |= !corr;
+ }
+
+- err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
++ /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
++ * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
++ * Parity Errors here
++ */
++ err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
+ corr = dma & XGMAC_DECIS;
+ if (err) {
+ dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
+@@ -887,6 +942,7 @@ static const struct dwxgmac3_error {
+ { dwxgmac3_mac_errors },
+ { dwxgmac3_mtl_errors },
+ { dwxgmac3_dma_errors },
++ { dwxgmac3_dma_dpp_errors },
+ };
+
+ static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
+diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
+index c3fbdd6b68baf..f3fa4bd121169 100644
+--- a/drivers/net/netdevsim/dev.c
++++ b/drivers/net/netdevsim/dev.c
+@@ -835,14 +835,14 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
+ trap_report_dw.work);
+ nsim_dev = nsim_trap_data->nsim_dev;
+
+- /* For each running port and enabled packet trap, generate a UDP
+- * packet with a random 5-tuple and report it.
+- */
+ if (!devl_trylock(priv_to_devlink(nsim_dev))) {
+- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 0);
++ schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1);
+ return;
+ }
+
++ /* For each running port and enabled packet trap, generate a UDP
++ * packet with a random 5-tuple and report it.
++ */
+ list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
+ if (!netif_running(nsim_dev_port->ns->netdev))
+ continue;
+diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
+index 15a179631903f..abc65c4d7a303 100644
+--- a/drivers/net/ppp/ppp_async.c
++++ b/drivers/net/ppp/ppp_async.c
+@@ -469,6 +469,10 @@ ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
+ case PPPIOCSMRU:
+ if (get_user(val, p))
+ break;
++ if (val > U16_MAX) {
++ err = -EINVAL;
++ break;
++ }
+ if (val < PPP_MRU)
+ val = PPP_MRU;
+ ap->mru = val;
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index 9de617ca9daa2..7e61c6b278a74 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -675,8 +675,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ channel->irq = platform_get_irq_optional(pdev, 0);
+ channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
+ if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
+- int ret;
+-
+ channel->is_otg_channel = true;
+ channel->uses_otg_pins = !of_property_read_bool(dev->of_node,
+ "renesas,no-otg-pins");
+@@ -740,8 +738,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ ret = PTR_ERR(provider);
+ goto error;
+ } else if (channel->is_otg_channel) {
+- int ret;
+-
+ ret = device_create_file(dev, &dev_attr_role);
+ if (ret < 0)
+ goto error;
+diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
+index 31a775877f6e3..63c45809943ff 100644
+--- a/drivers/phy/ti/phy-omap-usb2.c
++++ b/drivers/phy/ti/phy-omap-usb2.c
+@@ -116,7 +116,7 @@ static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled)
+ {
+ struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
+
+- if (!phy->comparator)
++ if (!phy->comparator || !phy->comparator->set_vbus)
+ return -ENODEV;
+
+ return phy->comparator->set_vbus(phy->comparator, enabled);
+@@ -126,7 +126,7 @@ static int omap_usb_start_srp(struct usb_otg *otg)
+ {
+ struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
+
+- if (!phy->comparator)
++ if (!phy->comparator || !phy->comparator->start_srp)
+ return -ENODEV;
+
+ return phy->comparator->start_srp(phy->comparator);
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 66290961c47c2..cfa6f0edff17c 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -277,11 +277,12 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head)
+ {
+ struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
+ struct Scsi_Host *shost = scmd->device->host;
++ unsigned int busy = scsi_host_busy(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ shost->host_failed++;
+- scsi_eh_wakeup(shost, scsi_host_busy(shost));
++ scsi_eh_wakeup(shost, busy);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 0e7e9f1e5a029..5c5954b78585e 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -280,9 +280,11 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+ rcu_read_lock();
+ __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+ if (unlikely(scsi_host_in_recovery(shost))) {
++ unsigned int busy = scsi_host_busy(shost);
++
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (shost->host_failed || shost->host_eh_scheduled)
+- scsi_eh_wakeup(shost, scsi_host_busy(shost));
++ scsi_eh_wakeup(shost, busy);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+ rcu_read_unlock();
+diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
+index f6f13e7f1ba14..f4d8e80c4c347 100644
+--- a/drivers/usb/dwc3/host.c
++++ b/drivers/usb/dwc3/host.c
+@@ -66,7 +66,7 @@ out:
+
+ int dwc3_host_init(struct dwc3 *dwc)
+ {
+- struct property_entry props[4];
++ struct property_entry props[5];
+ struct platform_device *xhci;
+ int ret, irq;
+ int prop_idx = 0;
+@@ -94,6 +94,8 @@ int dwc3_host_init(struct dwc3 *dwc)
+
+ memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
+
++ props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-sg-trb-cache-size-quirk");
++
+ if (dwc->usb3_lpm_capable)
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
+
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index c9438dc56f5fc..b387d39bfb81d 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -301,6 +301,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
+ xhci->quirks |= XHCI_BROKEN_PORT_PED;
+
++ if (device_property_read_bool(tmpdev, "xhci-sg-trb-cache-size-quirk"))
++ xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK;
++
+ device_property_read_u32(tmpdev, "imod-interval-ns",
+ &xhci->imod_interval);
+ }
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f1d7a5a863aa4..b3e60b3847941 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -146,6 +146,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
++ { USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */
+ { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
+ { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
+ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 4adef92598709..c0a0cca65437f 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2269,6 +2269,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index b1e844bf31f81..703a9c5635573 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -184,6 +184,8 @@ static const struct usb_device_id id_table[] = {
+ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
+ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
+ {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */
++ {DEVICE_SWI(0x413c, 0x8217)}, /* Dell Wireless DW5826e */
++ {DEVICE_SWI(0x413c, 0x8218)}, /* Dell Wireless DW5826e QDL */
+
+ /* Huawei devices */
+ {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 2eea080298812..61c72e62abd49 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2588,12 +2588,11 @@ EXPORT_SYMBOL_GPL(vhost_disable_notify);
+ /* Create a new message. */
+ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
+ {
+- struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
++ /* Make sure all padding within the structure is initialized. */
++ struct vhost_msg_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return NULL;
+
+- /* Make sure all padding within the structure is initialized. */
+- memset(&node->msg, 0, sizeof node->msg);
+ node->vq = vq;
+ node->msg.type = type;
+ return node;
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index 72f34f96d0155..2c797eb519da9 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -174,7 +174,7 @@ static LIST_HEAD(dlm_node_addrs);
+ static DEFINE_SPINLOCK(dlm_node_addrs_spin);
+
+ static struct listen_connection listen_con;
+-static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
++static struct sockaddr_storage dlm_local_addr[DLM_MAX_ADDR_COUNT];
+ static int dlm_local_count;
+ int dlm_allow_conn;
+
+@@ -398,7 +398,7 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
+ if (!sa_out)
+ return 0;
+
+- if (dlm_local_addr[0]->ss_family == AF_INET) {
++ if (dlm_local_addr[0].ss_family == AF_INET) {
+ struct sockaddr_in *in4 = (struct sockaddr_in *) &sas;
+ struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
+ ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
+@@ -727,7 +727,7 @@ static void add_sock(struct socket *sock, struct connection *con)
+ static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
+ int *addr_len)
+ {
+- saddr->ss_family = dlm_local_addr[0]->ss_family;
++ saddr->ss_family = dlm_local_addr[0].ss_family;
+ if (saddr->ss_family == AF_INET) {
+ struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
+ in4_addr->sin_port = cpu_to_be16(port);
+@@ -1167,7 +1167,7 @@ static int sctp_bind_addrs(struct socket *sock, uint16_t port)
+ int i, addr_len, result = 0;
+
+ for (i = 0; i < dlm_local_count; i++) {
+- memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
++ memcpy(&localaddr, &dlm_local_addr[i], sizeof(localaddr));
+ make_sockaddr(&localaddr, port, &addr_len);
+
+ if (!i)
+@@ -1187,7 +1187,7 @@ static int sctp_bind_addrs(struct socket *sock, uint16_t port)
+ /* Get local addresses */
+ static void init_local(void)
+ {
+- struct sockaddr_storage sas, *addr;
++ struct sockaddr_storage sas;
+ int i;
+
+ dlm_local_count = 0;
+@@ -1195,21 +1195,10 @@ static void init_local(void)
+ if (dlm_our_addr(&sas, i))
+ break;
+
+- addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
+- if (!addr)
+- break;
+- dlm_local_addr[dlm_local_count++] = addr;
++ memcpy(&dlm_local_addr[dlm_local_count++], &sas, sizeof(sas));
+ }
+ }
+
+-static void deinit_local(void)
+-{
+- int i;
+-
+- for (i = 0; i < dlm_local_count; i++)
+- kfree(dlm_local_addr[i]);
+-}
+-
+ static struct writequeue_entry *new_writequeue_entry(struct connection *con)
+ {
+ struct writequeue_entry *entry;
+@@ -1575,7 +1564,7 @@ static void dlm_connect(struct connection *con)
+ }
+
+ /* Create a socket to communicate with */
+- result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
++ result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family,
+ SOCK_STREAM, dlm_proto_ops->proto, &sock);
+ if (result < 0)
+ goto socket_err;
+@@ -1786,7 +1775,6 @@ void dlm_lowcomms_stop(void)
+ foreach_conn(free_conn);
+ srcu_read_unlock(&connections_srcu, idx);
+ work_stop();
+- deinit_local();
+
+ dlm_proto_ops = NULL;
+ }
+@@ -1803,7 +1791,7 @@ static int dlm_listen_for_all(void)
+ if (result < 0)
+ return result;
+
+- result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
++ result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family,
+ SOCK_STREAM, dlm_proto_ops->proto, &sock);
+ if (result < 0) {
+ log_print("Can't create comms socket: %d", result);
+@@ -1842,7 +1830,7 @@ static int dlm_tcp_bind(struct socket *sock)
+ /* Bind to our cluster-known address connecting to avoid
+ * routing problems.
+ */
+- memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
++ memcpy(&src_addr, &dlm_local_addr[0], sizeof(src_addr));
+ make_sockaddr(&src_addr, 0, &addr_len);
+
+ result = kernel_bind(sock, (struct sockaddr *)&src_addr,
+@@ -1899,9 +1887,9 @@ static int dlm_tcp_listen_bind(struct socket *sock)
+ int addr_len;
+
+ /* Bind to our port */
+- make_sockaddr(dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
++ make_sockaddr(&dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
+ return kernel_bind(sock, (struct sockaddr *)&dlm_local_addr[0],
+- addr_len);
++ addr_len);
+ }
+
+ static const struct dlm_proto_ops dlm_tcp_ops = {
+@@ -1992,7 +1980,7 @@ int dlm_lowcomms_start(void)
+
+ error = work_start();
+ if (error)
+- goto fail_local;
++ goto fail;
+
+ dlm_allow_conn = 1;
+
+@@ -2022,8 +2010,6 @@ fail_listen:
+ fail_proto_ops:
+ dlm_allow_conn = 0;
+ work_stop();
+-fail_local:
+- deinit_local();
+ fail:
+ return error;
+ }
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index c1515daf1def1..40903c172a34f 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1118,6 +1118,24 @@ void ext4_mb_generate_buddy(struct super_block *sb,
+ atomic64_add(period, &sbi->s_mb_generation_time);
+ }
+
++static void mb_regenerate_buddy(struct ext4_buddy *e4b)
++{
++ int count;
++ int order = 1;
++ void *buddy;
++
++ while ((buddy = mb_find_buddy(e4b, order++, &count)))
++ mb_set_bits(buddy, 0, count);
++
++ e4b->bd_info->bb_fragments = 0;
++ memset(e4b->bd_info->bb_counters, 0,
++ sizeof(*e4b->bd_info->bb_counters) *
++ (e4b->bd_sb->s_blocksize_bits + 2));
++
++ ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
++ e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
++}
++
+ /* The buddy information is attached the buddy cache inode
+ * for convenience. The information regarding each group
+ * is loaded via ext4_mb_load_buddy. The information involve
+@@ -1796,6 +1814,8 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
+ ext4_mark_group_bitmap_corrupted(
+ sb, e4b->bd_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
++ } else {
++ mb_regenerate_buddy(e4b);
+ }
+ goto done;
+ }
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 3d9f6495a4db4..967262c37da52 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -55,6 +55,7 @@ struct f2fs_compress_ops {
+ int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
+ void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
+ int (*decompress_pages)(struct decompress_io_ctx *dic);
++ bool (*is_level_valid)(int level);
+ };
+
+ static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
+@@ -322,11 +323,21 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic)
+ return 0;
+ }
+
++static bool lz4_is_level_valid(int lvl)
++{
++#ifdef CONFIG_F2FS_FS_LZ4HC
++ return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL);
++#else
++ return lvl == 0;
++#endif
++}
++
+ static const struct f2fs_compress_ops f2fs_lz4_ops = {
+ .init_compress_ctx = lz4_init_compress_ctx,
+ .destroy_compress_ctx = lz4_destroy_compress_ctx,
+ .compress_pages = lz4_compress_pages,
+ .decompress_pages = lz4_decompress_pages,
++ .is_level_valid = lz4_is_level_valid,
+ };
+ #endif
+
+@@ -490,6 +501,11 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
+ return 0;
+ }
+
++static bool zstd_is_level_valid(int lvl)
++{
++ return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel();
++}
++
+ static const struct f2fs_compress_ops f2fs_zstd_ops = {
+ .init_compress_ctx = zstd_init_compress_ctx,
+ .destroy_compress_ctx = zstd_destroy_compress_ctx,
+@@ -497,6 +513,7 @@ static const struct f2fs_compress_ops f2fs_zstd_ops = {
+ .init_decompress_ctx = zstd_init_decompress_ctx,
+ .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
+ .decompress_pages = zstd_decompress_pages,
++ .is_level_valid = zstd_is_level_valid,
+ };
+ #endif
+
+@@ -555,6 +572,16 @@ bool f2fs_is_compress_backend_ready(struct inode *inode)
+ return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
+ }
+
++bool f2fs_is_compress_level_valid(int alg, int lvl)
++{
++ const struct f2fs_compress_ops *cops = f2fs_cops[alg];
++
++ if (cops->is_level_valid)
++ return cops->is_level_valid(lvl);
++
++ return lvl == 0;
++}
++
+ static mempool_t *compress_page_pool;
+ static int num_compress_pages = 512;
+ module_param(num_compress_pages, uint, 0444);
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 5c76ba764b71f..e5a9498b89c06 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -4219,6 +4219,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
+ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
+ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
+ bool f2fs_is_compress_backend_ready(struct inode *inode);
++bool f2fs_is_compress_level_valid(int alg, int lvl);
+ int f2fs_init_compress_mempool(void);
+ void f2fs_destroy_compress_mempool(void);
+ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
+@@ -4283,6 +4284,7 @@ static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
+ /* not support compression */
+ return false;
+ }
++static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; }
+ static inline struct page *f2fs_compress_control_page(struct page *page)
+ {
+ WARN_ON_ONCE(1);
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 3805162dcef2b..0c0d0671febea 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -628,7 +628,7 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
+ if (kstrtouint(str + 1, 10, &level))
+ return -EINVAL;
+
+- if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
++ if (!f2fs_is_compress_level_valid(COMPRESS_LZ4, level)) {
+ f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
+ return -EINVAL;
+ }
+@@ -666,7 +666,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
+ if (kstrtouint(str + 1, 10, &level))
+ return -EINVAL;
+
+- if (level < zstd_min_clevel() || level > zstd_max_clevel()) {
++ if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
+ f2fs_info(sbi, "invalid zstd compress level: %d", level);
+ return -EINVAL;
+ }
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index 8c9abaf139e67..74482ef569ab7 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -467,7 +467,7 @@ bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
+ int al_update(struct ntfs_inode *ni, int sync);
+ static inline size_t al_aligned(size_t size)
+ {
+- return (size + 1023) & ~(size_t)1023;
++ return size_add(size, 1023) & ~(size_t)1023;
+ }
+
+ /* Globals from bitfunc.c */
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index 634035bcb9347..b8e14bcd2c68d 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -248,6 +248,8 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ &iface->sockaddr,
+ rc);
+ kref_put(&iface->refcount, release_iface);
++ /* failure to add chan should increase weight */
++ iface->weight_fulfilled++;
+ continue;
+ }
+
+diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
+index c923f4e60f240..3576c6e89fea4 100644
+--- a/include/linux/dmaengine.h
++++ b/include/linux/dmaengine.h
+@@ -954,7 +954,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
+
+ static inline bool is_slave_direction(enum dma_transfer_direction direction)
+ {
+- return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
++ return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) ||
++ (direction == DMA_DEV_TO_DEV);
+ }
+
+ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index f2044d5a652b5..254d4a898179c 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -197,6 +197,7 @@ enum hrtimer_base_type {
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
+ * expired
++ * @online: CPU is online from an hrtimers point of view
+ * @timer_waiters: A hrtimer_cancel() invocation waits for the timer
+ * callback to finish.
+ * @expires_next: absolute time of the next event, is required for remote
+@@ -219,7 +220,8 @@ struct hrtimer_cpu_base {
+ unsigned int hres_active : 1,
+ in_hrtirq : 1,
+ hang_detected : 1,
+- softirq_activated : 1;
++ softirq_activated : 1,
++ online : 1;
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int nr_events;
+ unsigned short nr_retries;
+diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
+index af8f4c304d272..707af820f1a97 100644
+--- a/include/uapi/linux/netfilter/nf_tables.h
++++ b/include/uapi/linux/netfilter/nf_tables.h
+@@ -266,9 +266,11 @@ enum nft_rule_attributes {
+ /**
+ * enum nft_rule_compat_flags - nf_tables rule compat flags
+ *
++ * @NFT_RULE_COMPAT_F_UNUSED: unused
+ * @NFT_RULE_COMPAT_F_INV: invert the check result
+ */
+ enum nft_rule_compat_flags {
++ NFT_RULE_COMPAT_F_UNUSED = (1 << 0),
+ NFT_RULE_COMPAT_F_INV = (1 << 1),
+ NFT_RULE_COMPAT_F_MASK = NFT_RULE_COMPAT_F_INV,
+ };
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 67f09a40bcb21..618ab186fe036 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -875,6 +875,7 @@ retry_multishot:
+ if (!buf)
+ return -ENOBUFS;
+ sr->buf = buf;
++ sr->len = len;
+ }
+
+ ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 1c90e710d537f..cc6db3bce1b2f 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -126,6 +126,7 @@ static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
+ static DEFINE_SPINLOCK(watchdog_lock);
+ static int watchdog_running;
+ static atomic_t watchdog_reset_pending;
++static int64_t watchdog_max_interval;
+
+ static inline void clocksource_watchdog_lock(unsigned long *flags)
+ {
+@@ -144,6 +145,7 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating);
+ * Interval: 0.5sec.
+ */
+ #define WATCHDOG_INTERVAL (HZ >> 1)
++#define WATCHDOG_INTERVAL_MAX_NS ((2 * WATCHDOG_INTERVAL) * (NSEC_PER_SEC / HZ))
+
+ static void clocksource_watchdog_work(struct work_struct *work)
+ {
+@@ -396,8 +398,8 @@ static inline void clocksource_reset_watchdog(void)
+ static void clocksource_watchdog(struct timer_list *unused)
+ {
+ u64 csnow, wdnow, cslast, wdlast, delta;
++ int64_t wd_nsec, cs_nsec, interval;
+ int next_cpu, reset_pending;
+- int64_t wd_nsec, cs_nsec;
+ struct clocksource *cs;
+ enum wd_read_status read_ret;
+ unsigned long extra_wait = 0;
+@@ -467,6 +469,27 @@ static void clocksource_watchdog(struct timer_list *unused)
+ if (atomic_read(&watchdog_reset_pending))
+ continue;
+
++ /*
++ * The processing of timer softirqs can get delayed (usually
++ * on account of ksoftirqd not getting to run in a timely
++ * manner), which causes the watchdog interval to stretch.
++ * Skew detection may fail for longer watchdog intervals
++ * on account of fixed margins being used.
++ * Some clocksources, e.g. acpi_pm, cannot tolerate
++ * watchdog intervals longer than a few seconds.
++ */
++ interval = max(cs_nsec, wd_nsec);
++ if (unlikely(interval > WATCHDOG_INTERVAL_MAX_NS)) {
++ if (system_state > SYSTEM_SCHEDULING &&
++ interval > 2 * watchdog_max_interval) {
++ watchdog_max_interval = interval;
++ pr_warn("Long readout interval, skipping watchdog check: cs_nsec: %lld wd_nsec: %lld\n",
++ cs_nsec, wd_nsec);
++ }
++ watchdog_timer.expires = jiffies;
++ continue;
++ }
++
+ /* Check the deviation from the watchdog clocksource. */
+ md = cs->uncertainty_margin + watchdog->uncertainty_margin;
+ if (abs(cs_nsec - wd_nsec) > md) {
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 5561dabc9b225..8e0aff1d1ea4f 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1082,6 +1082,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
+ enum hrtimer_mode mode)
+ {
+ debug_activate(timer, mode);
++ WARN_ON_ONCE(!base->cpu_base->online);
+
+ base->cpu_base->active_bases |= 1 << base->index;
+
+@@ -2180,6 +2181,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
+ cpu_base->softirq_next_timer = NULL;
+ cpu_base->expires_next = KTIME_MAX;
+ cpu_base->softirq_expires_next = KTIME_MAX;
++ cpu_base->online = 1;
+ hrtimer_cpu_base_init_expiry_lock(cpu_base);
+ return 0;
+ }
+@@ -2247,6 +2249,7 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
+ smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
+
+ raw_spin_unlock(&new_base->lock);
++ old_base->online = 0;
+ raw_spin_unlock(&old_base->lock);
+
+ return 0;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 2f646335d2183..9408dc3bb42d3 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1637,10 +1637,12 @@ EXPORT_SYMBOL(inet_current_timestamp);
+
+ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+- if (sk->sk_family == AF_INET)
++ unsigned int family = READ_ONCE(sk->sk_family);
++
++ if (family == AF_INET)
+ return ip_recv_error(sk, msg, len, addr_len);
+ #if IS_ENABLED(CONFIG_IPV6)
+- if (sk->sk_family == AF_INET6)
++ if (family == AF_INET6)
+ return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
+ #endif
+ return -EINVAL;
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index 586b1b3e35b80..80ccd6661aa32 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -332,7 +332,7 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
+ };
+ skb_reset_network_header(skb);
+
+- csum = csum_partial(icmp6h, len, 0);
++ csum = skb_checksum(skb, skb_transport_offset(skb), len, 0);
+ icmp6h->icmp6_cksum = csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, len,
+ IPPROTO_ICMPV6, csum);
+
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index c07645c999f9a..c6f0da028a2a4 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -7221,8 +7221,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+
+ rcu_read_lock();
+ beacon_ies = rcu_dereference(req->bss->beacon_ies);
+-
+- if (beacon_ies) {
++ if (!beacon_ies) {
+ /*
+ * Wait up to one beacon interval ...
+ * should this be more if we miss one?
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index 6952da7dfc02c..e1623fbf36548 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -135,7 +135,7 @@ static void nft_target_eval_bridge(const struct nft_expr *expr,
+
+ static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = {
+ [NFTA_TARGET_NAME] = { .type = NLA_NUL_STRING },
+- [NFTA_TARGET_REV] = { .type = NLA_U32 },
++ [NFTA_TARGET_REV] = NLA_POLICY_MAX(NLA_BE32, 255),
+ [NFTA_TARGET_INFO] = { .type = NLA_BINARY },
+ };
+
+@@ -200,6 +200,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1]
+ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
+ {
+ struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
++ u32 l4proto;
+ u32 flags;
+ int err;
+
+@@ -212,12 +213,18 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
+ return -EINVAL;
+
+ flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS]));
+- if (flags & ~NFT_RULE_COMPAT_F_MASK)
++ if (flags & NFT_RULE_COMPAT_F_UNUSED ||
++ flags & ~NFT_RULE_COMPAT_F_MASK)
+ return -EINVAL;
+ if (flags & NFT_RULE_COMPAT_F_INV)
+ *inv = true;
+
+- *proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
++ l4proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
++ if (l4proto > U16_MAX)
++ return -EINVAL;
++
++ *proto = l4proto;
++
+ return 0;
+ }
+
+@@ -418,7 +425,7 @@ static void nft_match_eval(const struct nft_expr *expr,
+
+ static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
+ [NFTA_MATCH_NAME] = { .type = NLA_NUL_STRING },
+- [NFTA_MATCH_REV] = { .type = NLA_U32 },
++ [NFTA_MATCH_REV] = NLA_POLICY_MAX(NLA_BE32, 255),
+ [NFTA_MATCH_INFO] = { .type = NLA_BINARY },
+ };
+
+@@ -721,7 +728,7 @@ out_put:
+ static const struct nla_policy nfnl_compat_policy_get[NFTA_COMPAT_MAX+1] = {
+ [NFTA_COMPAT_NAME] = { .type = NLA_NUL_STRING,
+ .len = NFT_COMPAT_NAME_MAX-1 },
+- [NFTA_COMPAT_REV] = { .type = NLA_U32 },
++ [NFTA_COMPAT_REV] = NLA_POLICY_MAX(NLA_BE32, 255),
+ [NFTA_COMPAT_TYPE] = { .type = NLA_U32 },
+ };
+
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index 1101665f52537..8df7564f0611e 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -484,6 +484,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
+ break;
+ #endif
+ case NFT_CT_ID:
++ if (tb[NFTA_CT_DIRECTION])
++ return -EINVAL;
++
+ len = sizeof(u32);
+ break;
+ default:
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 4e1cc31729b80..e1969209b3abb 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -342,9 +342,6 @@
+ #include "nft_set_pipapo_avx2.h"
+ #include "nft_set_pipapo.h"
+
+-/* Current working bitmap index, toggled between field matches */
+-static DEFINE_PER_CPU(bool, nft_pipapo_scratch_index);
+-
+ /**
+ * pipapo_refill() - For each set bit, set bits from selected mapping table item
+ * @map: Bitmap to be scanned for set bits
+@@ -412,6 +409,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
++ struct nft_pipapo_scratch *scratch;
+ unsigned long *res_map, *fill_map;
+ u8 genmask = nft_genmask_cur(net);
+ const u8 *rp = (const u8 *)key;
+@@ -422,15 +420,17 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+
+ local_bh_disable();
+
+- map_index = raw_cpu_read(nft_pipapo_scratch_index);
+-
+ m = rcu_dereference(priv->match);
+
+ if (unlikely(!m || !*raw_cpu_ptr(m->scratch)))
+ goto out;
+
+- res_map = *raw_cpu_ptr(m->scratch) + (map_index ? m->bsize_max : 0);
+- fill_map = *raw_cpu_ptr(m->scratch) + (map_index ? 0 : m->bsize_max);
++ scratch = *raw_cpu_ptr(m->scratch);
++
++ map_index = scratch->map_index;
++
++ res_map = scratch->map + (map_index ? m->bsize_max : 0);
++ fill_map = scratch->map + (map_index ? 0 : m->bsize_max);
+
+ memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
+
+@@ -460,7 +460,7 @@ next_match:
+ b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
+ last);
+ if (b < 0) {
+- raw_cpu_write(nft_pipapo_scratch_index, map_index);
++ scratch->map_index = map_index;
+ local_bh_enable();
+
+ return false;
+@@ -477,7 +477,7 @@ next_match:
+ * current inactive bitmap is clean and can be reused as
+ * *next* bitmap (not initial) for the next packet.
+ */
+- raw_cpu_write(nft_pipapo_scratch_index, map_index);
++ scratch->map_index = map_index;
+ local_bh_enable();
+
+ return true;
+@@ -1101,6 +1101,25 @@ static void pipapo_map(struct nft_pipapo_match *m,
+ f->mt[map[i].to + j].e = e;
+ }
+
++/**
++ * pipapo_free_scratch() - Free per-CPU map at original (not aligned) address
++ * @m: Matching data
++ * @cpu: CPU number
++ */
++static void pipapo_free_scratch(const struct nft_pipapo_match *m, unsigned int cpu)
++{
++ struct nft_pipapo_scratch *s;
++ void *mem;
++
++ s = *per_cpu_ptr(m->scratch, cpu);
++ if (!s)
++ return;
++
++ mem = s;
++ mem -= s->align_off;
++ kfree(mem);
++}
++
+ /**
+ * pipapo_realloc_scratch() - Reallocate scratch maps for partial match results
+ * @clone: Copy of matching data with pending insertions and deletions
+@@ -1114,12 +1133,13 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
+ int i;
+
+ for_each_possible_cpu(i) {
+- unsigned long *scratch;
++ struct nft_pipapo_scratch *scratch;
+ #ifdef NFT_PIPAPO_ALIGN
+- unsigned long *scratch_aligned;
++ void *scratch_aligned;
++ u32 align_off;
+ #endif
+-
+- scratch = kzalloc_node(bsize_max * sizeof(*scratch) * 2 +
++ scratch = kzalloc_node(struct_size(scratch, map,
++ bsize_max * 2) +
+ NFT_PIPAPO_ALIGN_HEADROOM,
+ GFP_KERNEL, cpu_to_node(i));
+ if (!scratch) {
+@@ -1133,14 +1153,25 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
+ return -ENOMEM;
+ }
+
+- kfree(*per_cpu_ptr(clone->scratch, i));
+-
+- *per_cpu_ptr(clone->scratch, i) = scratch;
++ pipapo_free_scratch(clone, i);
+
+ #ifdef NFT_PIPAPO_ALIGN
+- scratch_aligned = NFT_PIPAPO_LT_ALIGN(scratch);
+- *per_cpu_ptr(clone->scratch_aligned, i) = scratch_aligned;
++ /* Align &scratch->map (not the struct itself): the extra
++ * %NFT_PIPAPO_ALIGN_HEADROOM bytes passed to kzalloc_node()
++ * above guarantee we can waste up to those bytes in order
++ * to align the map field regardless of its offset within
++ * the struct.
++ */
++ BUILD_BUG_ON(offsetof(struct nft_pipapo_scratch, map) > NFT_PIPAPO_ALIGN_HEADROOM);
++
++ scratch_aligned = NFT_PIPAPO_LT_ALIGN(&scratch->map);
++ scratch_aligned -= offsetof(struct nft_pipapo_scratch, map);
++ align_off = scratch_aligned - (void *)scratch;
++
++ scratch = scratch_aligned;
++ scratch->align_off = align_off;
+ #endif
++ *per_cpu_ptr(clone->scratch, i) = scratch;
+ }
+
+ return 0;
+@@ -1294,11 +1325,6 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
+ if (!new->scratch)
+ goto out_scratch;
+
+-#ifdef NFT_PIPAPO_ALIGN
+- new->scratch_aligned = alloc_percpu(*new->scratch_aligned);
+- if (!new->scratch_aligned)
+- goto out_scratch;
+-#endif
+ for_each_possible_cpu(i)
+ *per_cpu_ptr(new->scratch, i) = NULL;
+
+@@ -1350,10 +1376,7 @@ out_lt:
+ }
+ out_scratch_realloc:
+ for_each_possible_cpu(i)
+- kfree(*per_cpu_ptr(new->scratch, i));
+-#ifdef NFT_PIPAPO_ALIGN
+- free_percpu(new->scratch_aligned);
+-#endif
++ pipapo_free_scratch(new, i);
+ out_scratch:
+ free_percpu(new->scratch);
+ kfree(new);
+@@ -1638,13 +1661,9 @@ static void pipapo_free_match(struct nft_pipapo_match *m)
+ int i;
+
+ for_each_possible_cpu(i)
+- kfree(*per_cpu_ptr(m->scratch, i));
++ pipapo_free_scratch(m, i);
+
+-#ifdef NFT_PIPAPO_ALIGN
+- free_percpu(m->scratch_aligned);
+-#endif
+ free_percpu(m->scratch);
+-
+ pipapo_free_fields(m);
+
+ kfree(m);
+@@ -2132,7 +2151,7 @@ static int nft_pipapo_init(const struct nft_set *set,
+ m->field_count = field_count;
+ m->bsize_max = 0;
+
+- m->scratch = alloc_percpu(unsigned long *);
++ m->scratch = alloc_percpu(struct nft_pipapo_scratch *);
+ if (!m->scratch) {
+ err = -ENOMEM;
+ goto out_scratch;
+@@ -2140,16 +2159,6 @@ static int nft_pipapo_init(const struct nft_set *set,
+ for_each_possible_cpu(i)
+ *per_cpu_ptr(m->scratch, i) = NULL;
+
+-#ifdef NFT_PIPAPO_ALIGN
+- m->scratch_aligned = alloc_percpu(unsigned long *);
+- if (!m->scratch_aligned) {
+- err = -ENOMEM;
+- goto out_free;
+- }
+- for_each_possible_cpu(i)
+- *per_cpu_ptr(m->scratch_aligned, i) = NULL;
+-#endif
+-
+ rcu_head_init(&m->rcu);
+
+ nft_pipapo_for_each_field(f, i, m) {
+@@ -2180,9 +2189,6 @@ static int nft_pipapo_init(const struct nft_set *set,
+ return 0;
+
+ out_free:
+-#ifdef NFT_PIPAPO_ALIGN
+- free_percpu(m->scratch_aligned);
+-#endif
+ free_percpu(m->scratch);
+ out_scratch:
+ kfree(m);
+@@ -2236,11 +2242,8 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
+
+ nft_set_pipapo_match_destroy(ctx, set, m);
+
+-#ifdef NFT_PIPAPO_ALIGN
+- free_percpu(m->scratch_aligned);
+-#endif
+ for_each_possible_cpu(cpu)
+- kfree(*per_cpu_ptr(m->scratch, cpu));
++ pipapo_free_scratch(m, cpu);
+ free_percpu(m->scratch);
+ pipapo_free_fields(m);
+ kfree(m);
+@@ -2253,11 +2256,8 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
+ if (priv->dirty)
+ nft_set_pipapo_match_destroy(ctx, set, m);
+
+-#ifdef NFT_PIPAPO_ALIGN
+- free_percpu(priv->clone->scratch_aligned);
+-#endif
+ for_each_possible_cpu(cpu)
+- kfree(*per_cpu_ptr(priv->clone->scratch, cpu));
++ pipapo_free_scratch(priv->clone, cpu);
+ free_percpu(priv->clone->scratch);
+
+ pipapo_free_fields(priv->clone);
+diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
+index 25a75591583eb..30a3d092cd841 100644
+--- a/net/netfilter/nft_set_pipapo.h
++++ b/net/netfilter/nft_set_pipapo.h
+@@ -130,21 +130,29 @@ struct nft_pipapo_field {
+ union nft_pipapo_map_bucket *mt;
+ };
+
++/**
++ * struct nft_pipapo_scratch - percpu data used for lookup and matching
++ * @map_index: Current working bitmap index, toggled between field matches
++ * @align_off: Offset to get the originally allocated address
++ * @map: store partial matching results during lookup
++ */
++struct nft_pipapo_scratch {
++ u8 map_index;
++ u32 align_off;
++ unsigned long map[];
++};
++
+ /**
+ * struct nft_pipapo_match - Data used for lookup and matching
+ * @field_count Amount of fields in set
+ * @scratch: Preallocated per-CPU maps for partial matching results
+- * @scratch_aligned: Version of @scratch aligned to NFT_PIPAPO_ALIGN bytes
+ * @bsize_max: Maximum lookup table bucket size of all fields, in longs
+ * @rcu Matching data is swapped on commits
+ * @f: Fields, with lookup and mapping tables
+ */
+ struct nft_pipapo_match {
+ int field_count;
+-#ifdef NFT_PIPAPO_ALIGN
+- unsigned long * __percpu *scratch_aligned;
+-#endif
+- unsigned long * __percpu *scratch;
++ struct nft_pipapo_scratch * __percpu *scratch;
+ size_t bsize_max;
+ struct rcu_head rcu;
+ struct nft_pipapo_field f[];
+diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
+index 52e0d026d30ad..90e275bb3e5d7 100644
+--- a/net/netfilter/nft_set_pipapo_avx2.c
++++ b/net/netfilter/nft_set_pipapo_avx2.c
+@@ -71,9 +71,6 @@
+ #define NFT_PIPAPO_AVX2_ZERO(reg) \
+ asm volatile("vpxor %ymm" #reg ", %ymm" #reg ", %ymm" #reg)
+
+-/* Current working bitmap index, toggled between field matches */
+-static DEFINE_PER_CPU(bool, nft_pipapo_avx2_scratch_index);
+-
+ /**
+ * nft_pipapo_avx2_prepare() - Prepare before main algorithm body
+ *
+@@ -1120,11 +1117,12 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+- unsigned long *res, *fill, *scratch;
++ struct nft_pipapo_scratch *scratch;
+ u8 genmask = nft_genmask_cur(net);
+ const u8 *rp = (const u8 *)key;
+ struct nft_pipapo_match *m;
+ struct nft_pipapo_field *f;
++ unsigned long *res, *fill;
+ bool map_index;
+ int i, ret = 0;
+
+@@ -1141,15 +1139,16 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ */
+ kernel_fpu_begin_mask(0);
+
+- scratch = *raw_cpu_ptr(m->scratch_aligned);
++ scratch = *raw_cpu_ptr(m->scratch);
+ if (unlikely(!scratch)) {
+ kernel_fpu_end();
+ return false;
+ }
+- map_index = raw_cpu_read(nft_pipapo_avx2_scratch_index);
+
+- res = scratch + (map_index ? m->bsize_max : 0);
+- fill = scratch + (map_index ? 0 : m->bsize_max);
++ map_index = scratch->map_index;
++
++ res = scratch->map + (map_index ? m->bsize_max : 0);
++ fill = scratch->map + (map_index ? 0 : m->bsize_max);
+
+ /* Starting map doesn't need to be set for this implementation */
+
+@@ -1221,7 +1220,7 @@ next_match:
+
+ out:
+ if (i % 2)
+- raw_cpu_write(nft_pipapo_avx2_scratch_index, !map_index);
++ scratch->map_index = !map_index;
+ kernel_fpu_end();
+
+ return ret >= 0;
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index e34662f4a71e0..5bf5572e945cc 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -235,7 +235,7 @@ static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
+
+ static const struct nft_rbtree_elem *
+ nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
+- struct nft_rbtree_elem *rbe, u8 genmask)
++ struct nft_rbtree_elem *rbe)
+ {
+ struct nft_set *set = (struct nft_set *)__set;
+ struct rb_node *prev = rb_prev(&rbe->node);
+@@ -254,7 +254,7 @@ nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
+ while (prev) {
+ rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+ if (nft_rbtree_interval_end(rbe_prev) &&
+- nft_set_elem_active(&rbe_prev->ext, genmask))
++ nft_set_elem_active(&rbe_prev->ext, NFT_GENMASK_ANY))
+ break;
+
+ prev = rb_prev(prev);
+@@ -365,7 +365,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
+ nft_set_elem_active(&rbe->ext, cur_genmask)) {
+ const struct nft_rbtree_elem *removed_end;
+
+- removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
++ removed_end = nft_rbtree_gc_elem(set, priv, rbe);
+ if (IS_ERR(removed_end))
+ return PTR_ERR(removed_end);
+
+diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
+index aab0697013982..5d91ef562ff78 100644
+--- a/net/rxrpc/conn_event.c
++++ b/net/rxrpc/conn_event.c
+@@ -41,6 +41,14 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
+
+ _enter("%d", conn->debug_id);
+
++ if (sp && sp->hdr.type == RXRPC_PACKET_TYPE_ACK) {
++ if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
++ &pkt.ack, sizeof(pkt.ack)) < 0)
++ return;
++ if (pkt.ack.reason == RXRPC_ACK_PING_RESPONSE)
++ return;
++ }
++
+ chan = &conn->channels[channel];
+
+ /* If the last call got moved on whilst we were waiting to run, just
+diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
+index cdcd2731860ba..1cb9935620886 100644
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -1088,6 +1088,12 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
+
+ #ifdef CONFIG_TIPC_MEDIA_UDP
+ if (attrs[TIPC_NLA_BEARER_UDP_OPTS]) {
++ if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
++ rtnl_unlock();
++ NL_SET_ERR_MSG(info->extack, "UDP option is unsupported");
++ return -EINVAL;
++ }
++
+ err = tipc_udp_nl_bearer_add(b,
+ attrs[TIPC_NLA_BEARER_UDP_OPTS]);
+ if (err) {
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index dc27635403932..767b338a7a2d4 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -314,6 +314,17 @@ void unix_gc(void)
+ /* Here we are. Hitlist is filled. Die. */
+ __skb_queue_purge(&hitlist);
+
++#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
++ list_for_each_entry_safe(u, next, &gc_candidates, link) {
++ struct sk_buff *skb = u->oob_skb;
++
++ if (skb) {
++ u->oob_skb = NULL;
++ kfree_skb(skb);
++ }
++ }
++#endif
++
+ spin_lock(&unix_gc_lock);
+
+ /* There could be io_uring registered files, just push them back to
+diff --git a/sound/soc/amd/acp-config.c b/sound/soc/amd/acp-config.c
+index 9ee71a99a0871..0932473b63945 100644
+--- a/sound/soc/amd/acp-config.c
++++ b/sound/soc/amd/acp-config.c
+@@ -3,7 +3,7 @@
+ // This file is provided under a dual BSD/GPLv2 license. When using or
+ // redistributing this file, you may do so under either license.
+ //
+-// Copyright(c) 2021, 2023 Advanced Micro Devices, Inc.
++// Copyright(c) 2021 Advanced Micro Devices, Inc.
+ //
+ // Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
+ //
+@@ -35,19 +35,6 @@ static const struct config_entry config_table[] = {
+ {}
+ },
+ },
+- {
+- .flags = FLAG_AMD_LEGACY,
+- .device = ACP_PCI_DEV_ID,
+- .dmi_table = (const struct dmi_system_id []) {
+- {
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Valve"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
+- },
+- },
+- {}
+- },
+- },
+ {
+ .flags = FLAG_AMD_SOF,
+ .device = ACP_PCI_DEV_ID,
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 33380cad3a735..b8a474a2e4d59 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2029,10 +2029,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x0499, 0x1509, /* Steinberg UR22 */
+ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++ DEVICE_FLG(0x0499, 0x3108, /* Yamaha YIT-W12TX */
++ QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x04d8, 0xfeea, /* Benchmark DAC1 Pre */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x04e8, 0xa051, /* Samsung USBC Headset (AKG) */
+ QUIRK_FLAG_SKIP_CLOCK_SELECTOR | QUIRK_FLAG_CTL_MSG_DELAY_5M),
++ DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
++ QUIRK_FLAG_IFACE_SKIP_CLOSE),
+ DEVICE_FLG(0x054c, 0x0b8c, /* Sony WALKMAN NW-A45 DAC */
+ QUIRK_FLAG_SET_IFACE_FIRST),
+ DEVICE_FLG(0x0556, 0x0014, /* Phoenix Audio TMX320VC */
+@@ -2071,14 +2075,22 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ DEVICE_FLG(0x0763, 0x2031, /* M-Audio Fast Track C600 */
+ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++ DEVICE_FLG(0x07fd, 0x000b, /* MOTU M Series 2nd hardware revision */
++ QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ DEVICE_FLG(0x08bb, 0x2702, /* LineX FM Transmitter */
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x0951, 0x16ad, /* Kingston HyperX */
+ QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ DEVICE_FLG(0x0b0e, 0x0349, /* Jabra 550a */
+ QUIRK_FLAG_CTL_MSG_DELAY_1M),
++ DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
++ QUIRK_FLAG_FIXED_RATE),
++ DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
++ QUIRK_FLAG_FIXED_RATE),
+ DEVICE_FLG(0x0fd9, 0x0008, /* Hauppauge HVR-950Q */
+ QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
++ DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
++ QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x1397, 0x0507, /* Behringer UMC202HD */
+@@ -2111,6 +2123,10 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
+ DEVICE_FLG(0x1901, 0x0191, /* GE B850V3 CP2114 audio interface */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
++ DEVICE_FLG(0x19f7, 0x0035, /* RODE NT-USB+ */
++ QUIRK_FLAG_GET_SAMPLE_RATE),
++ DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
++ QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x2040, 0x7200, /* Hauppauge HVR-950Q */
+ QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+ DEVICE_FLG(0x2040, 0x7201, /* Hauppauge HVR-950Q-MXL */
+@@ -2153,6 +2169,12 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
++ DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */
++ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++ DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */
++ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
++ DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */
++ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
+@@ -2161,22 +2183,6 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_ALIGN_TRANSFER),
+ DEVICE_FLG(0x534d, 0x2109, /* MacroSilicon MS2109 */
+ QUIRK_FLAG_ALIGN_TRANSFER),
+- DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
+- QUIRK_FLAG_GET_SAMPLE_RATE),
+- DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */
+- QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+- DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */
+- QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+- DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */
+- QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+- DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
+- QUIRK_FLAG_IFACE_SKIP_CLOSE),
+- DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
+- QUIRK_FLAG_FIXED_RATE),
+- DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
+- QUIRK_FLAG_FIXED_RATE),
+- DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
+- QUIRK_FLAG_GET_SAMPLE_RATE),
+
+ /* Vendor matches */
+ VENDOR_FLG(0x045e, /* MS Lifecam */
+diff --git a/tools/testing/selftests/net/cmsg_ipv6.sh b/tools/testing/selftests/net/cmsg_ipv6.sh
+index 330d0b1ceced3..c921750ca118d 100755
+--- a/tools/testing/selftests/net/cmsg_ipv6.sh
++++ b/tools/testing/selftests/net/cmsg_ipv6.sh
+@@ -91,7 +91,7 @@ for ovr in setsock cmsg both diff; do
+ check_result $? 0 "TCLASS $prot $ovr - pass"
+
+ while [ -d /proc/$BG ]; do
+- $NSEXE ./cmsg_sender -6 -p u $TGT6 1234
++ $NSEXE ./cmsg_sender -6 -p $p $m $((TOS2)) $TGT6 1234
+ done
+
+ tcpdump -r $TMPF -v 2>&1 | grep "class $TOS2" >> /dev/null
+@@ -128,7 +128,7 @@ for ovr in setsock cmsg both diff; do
+ check_result $? 0 "HOPLIMIT $prot $ovr - pass"
+
+ while [ -d /proc/$BG ]; do
+- $NSEXE ./cmsg_sender -6 -p u $TGT6 1234
++ $NSEXE ./cmsg_sender -6 -p $p $m $LIM $TGT6 1234
+ done
+
+ tcpdump -r $TMPF -v 2>&1 | grep "hlim $LIM[^0-9]" >> /dev/null
+diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
+index 1b6e484e586dc..00ab4c6e40446 100755
+--- a/tools/testing/selftests/net/pmtu.sh
++++ b/tools/testing/selftests/net/pmtu.sh
+@@ -1928,6 +1928,13 @@ check_command() {
+ return 0
+ }
+
++check_running() {
++ pid=${1}
++ cmd=${2}
++
++ [ "$(cat /proc/${pid}/cmdline 2>/dev/null | tr -d '\0')" = "{cmd}" ]
++}
++
+ test_cleanup_vxlanX_exception() {
+ outer="${1}"
+ encap="vxlan"
+@@ -1958,11 +1965,12 @@ test_cleanup_vxlanX_exception() {
+
+ ${ns_a} ip link del dev veth_A-R1 &
+ iplink_pid=$!
+- sleep 1
+- if [ "$(cat /proc/${iplink_pid}/cmdline 2>/dev/null | tr -d '\0')" = "iplinkdeldevveth_A-R1" ]; then
+- err " can't delete veth device in a timely manner, PMTU dst likely leaked"
+- return 1
+- fi
++ for i in $(seq 1 20); do
++ check_running ${iplink_pid} "iplinkdeldevveth_A-R1" || return 0
++ sleep 0.1
++ done
++ err " can't delete veth device in a timely manner, PMTU dst likely leaked"
++ return 1
+ }
+
+ test_cleanup_ipv6_exception() {
+diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
+index c079565add392..9690a5d7ffd7d 100755
+--- a/tools/testing/selftests/net/udpgro_fwd.sh
++++ b/tools/testing/selftests/net/udpgro_fwd.sh
+@@ -37,6 +37,10 @@ create_ns() {
+ for ns in $NS_SRC $NS_DST; do
+ ip netns add $ns
+ ip -n $ns link set dev lo up
++
++ # disable route solicitations to decrease 'noise' traffic
++ ip netns exec $ns sysctl -qw net.ipv6.conf.default.router_solicitations=0
++ ip netns exec $ns sysctl -qw net.ipv6.conf.all.router_solicitations=0
+ done
+
+ ip link add name veth$SRC type veth peer name veth$DST
+@@ -78,6 +82,12 @@ create_vxlan_pair() {
+ create_vxlan_endpoint $BASE$ns veth$ns $BM_NET_V6$((3 - $ns)) vxlan6$ns 6
+ ip -n $BASE$ns addr add dev vxlan6$ns $OL_NET_V6$ns/24 nodad
+ done
++
++ # preload neighbur cache, do avoid some noisy traffic
++ local addr_dst=$(ip -j -n $BASE$DST link show dev vxlan6$DST |jq -r '.[]["address"]')
++ local addr_src=$(ip -j -n $BASE$SRC link show dev vxlan6$SRC |jq -r '.[]["address"]')
++ ip -n $BASE$DST neigh add dev vxlan6$DST lladdr $addr_src $OL_NET_V6$SRC
++ ip -n $BASE$SRC neigh add dev vxlan6$SRC lladdr $addr_dst $OL_NET_V6$DST
+ }
+
+ is_ipv6() {
+@@ -117,7 +127,7 @@ run_test() {
+ # not enable GRO
+ ip netns exec $NS_DST $ipt -A INPUT -p udp --dport 4789
+ ip netns exec $NS_DST $ipt -A INPUT -p udp --dport 8000
+- ip netns exec $NS_DST ./udpgso_bench_rx -C 1000 -R 10 -n 10 -l 1300 $rx_args &
++ ip netns exec $NS_DST ./udpgso_bench_rx -C 2000 -R 100 -n 10 -l 1300 $rx_args &
+ local spid=$!
+ sleep 0.1
+ ip netns exec $NS_SRC ./udpgso_bench_tx $family -M 1 -s 13000 -S 1300 -D $dst
+@@ -166,7 +176,7 @@ run_bench() {
+ # bind the sender and the receiver to different CPUs to try
+ # get reproducible results
+ ip netns exec $NS_DST bash -c "echo 2 > /sys/class/net/veth$DST/queues/rx-0/rps_cpus"
+- ip netns exec $NS_DST taskset 0x2 ./udpgso_bench_rx -C 1000 -R 10 &
++ ip netns exec $NS_DST taskset 0x2 ./udpgso_bench_rx -C 2000 -R 100 &
+ local spid=$!
+ sleep 0.1
+ ip netns exec $NS_SRC taskset 0x1 ./udpgso_bench_tx $family -l 3 -S 1300 -D $dst
+diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
+index f35a924d4a303..1cbadd267c963 100644
+--- a/tools/testing/selftests/net/udpgso_bench_rx.c
++++ b/tools/testing/selftests/net/udpgso_bench_rx.c
+@@ -375,7 +375,7 @@ static void do_recv(void)
+ do_flush_udp(fd);
+
+ tnow = gettimeofday_ms();
+- if (tnow > treport) {
++ if (!cfg_expected_pkt_nr && tnow > treport) {
+ if (packets)
+ fprintf(stderr,
+ "%s rx: %6lu MB/s %8lu calls/s\n",