summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2020-04-21 07:22:45 -0400
committerMike Pagano <mpagano@gentoo.org>2020-04-21 07:22:45 -0400
commit448b36f25e511a3d58d54b0fb283fa5577cc805f (patch)
tree33bbd502150adcc616bdaae0c4291a02c62c53d8
parentUpdate distro Kconfig to support needed options for elogind (diff)
downloadlinux-patches-5.5.tar.gz
linux-patches-5.5.tar.bz2
linux-patches-5.5.zip
Linux patch 5.5.195.5-215.5
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1018_linux-5.5.19.patch2518
2 files changed, 2522 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index e488875f..7beb30dc 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,10 @@ Patch: 1017_linux-5.5.18.patch
From: http://www.kernel.org
Desc: Linux 5.5.18
+Patch: 1018_linux-5.5.19.patch
+From: http://www.kernel.org
+Desc: Linux 5.5.19
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1018_linux-5.5.19.patch b/1018_linux-5.5.19.patch
new file mode 100644
index 00000000..a7dc99c3
--- /dev/null
+++ b/1018_linux-5.5.19.patch
@@ -0,0 +1,2518 @@
+diff --git a/Makefile b/Makefile
+index 0dfb52e860b7..0c685a280e14 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 5
+-SUBLEVEL = 18
++SUBLEVEL = 19
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
+index 04717cf69db0..9bad960f2b39 100644
+--- a/arch/arm/boot/dts/imx7-colibri.dtsi
++++ b/arch/arm/boot/dts/imx7-colibri.dtsi
+@@ -345,7 +345,7 @@
+ &iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio1 &pinctrl_gpio2 &pinctrl_gpio3 &pinctrl_gpio4
+- &pinctrl_gpio7>;
++ &pinctrl_gpio7 &pinctrl_usbc_det>;
+
+ pinctrl_gpio1: gpio1-grp {
+ fsl,pins = <
+@@ -450,7 +450,6 @@
+
+ pinctrl_enet1: enet1grp {
+ fsl,pins = <
+- MX7D_PAD_ENET1_CRS__GPIO7_IO14 0x14
+ MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x73
+ MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x73
+ MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x73
+@@ -648,6 +647,12 @@
+ >;
+ };
+
++ pinctrl_usbc_det: gpio-usbc-det {
++ fsl,pins = <
++ MX7D_PAD_ENET1_CRS__GPIO7_IO14 0x14
++ >;
++ };
++
+ pinctrl_usbh_reg: gpio-usbh-vbus {
+ fsl,pins = <
+ MX7D_PAD_UART3_CTS_B__GPIO4_IO7 0x14 /* SODIMM 129 USBH PEN */
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
+index 596bc65f475c..b2cf6f192525 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
+@@ -743,6 +743,7 @@
+ };
+
+ &usb3_phy0 {
++ vbus-supply = <&reg_5v_p>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
+index 354b11e27c07..033a48f30dbb 100644
+--- a/arch/arm64/kernel/vdso.c
++++ b/arch/arm64/kernel/vdso.c
+@@ -260,18 +260,7 @@ static int __aarch32_alloc_vdso_pages(void)
+ if (ret)
+ return ret;
+
+- ret = aarch32_alloc_kuser_vdso_page();
+- if (ret) {
+- unsigned long c_vvar =
+- (unsigned long)page_to_virt(aarch32_vdso_pages[C_VVAR]);
+- unsigned long c_vdso =
+- (unsigned long)page_to_virt(aarch32_vdso_pages[C_VDSO]);
+-
+- free_page(c_vvar);
+- free_page(c_vdso);
+- }
+-
+- return ret;
++ return aarch32_alloc_kuser_vdso_page();
+ }
+ #else
+ static int __aarch32_alloc_vdso_pages(void)
+diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
+index 209492849566..5c524d4f71cd 100644
+--- a/arch/x86/include/asm/microcode_amd.h
++++ b/arch/x86/include/asm/microcode_amd.h
+@@ -41,7 +41,7 @@ struct microcode_amd {
+ unsigned int mpb[0];
+ };
+
+-#define PATCH_MAX_SIZE PAGE_SIZE
++#define PATCH_MAX_SIZE (3 * PAGE_SIZE)
+
+ #ifdef CONFIG_MICROCODE_AMD
+ extern void __init load_ucode_amd_bsp(unsigned int family);
+diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
+index 89049b343c7a..d8cc5223b7ce 100644
+--- a/arch/x86/kernel/cpu/resctrl/core.c
++++ b/arch/x86/kernel/cpu/resctrl/core.c
+@@ -578,6 +578,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
+ d->id = id;
+ cpumask_set_cpu(cpu, &d->cpu_mask);
+
++ rdt_domain_reconfigure_cdp(r);
++
+ if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
+ kfree(d);
+ return;
+diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
+index 181c992f448c..3dd13f3a8b23 100644
+--- a/arch/x86/kernel/cpu/resctrl/internal.h
++++ b/arch/x86/kernel/cpu/resctrl/internal.h
+@@ -601,5 +601,6 @@ bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
+ void __check_limbo(struct rdt_domain *d, bool force_free);
+ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r);
+ bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r);
++void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
+
+ #endif /* _ASM_X86_RESCTRL_INTERNAL_H */
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 954fd048ad9b..20856d80dce3 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -1769,6 +1769,19 @@ static int set_cache_qos_cfg(int level, bool enable)
+ return 0;
+ }
+
++/* Restore the qos cfg state when a domain comes online */
++void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
++{
++ if (!r->alloc_capable)
++ return;
++
++ if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
++ l2_qos_cfg_update(&r->alloc_enabled);
++
++ if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
++ l3_qos_cfg_update(&r->alloc_enabled);
++}
++
+ /*
+ * Enable or disable the MBA software controller
+ * which helps user specify bandwidth in MBps.
+@@ -2993,7 +3006,8 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
+ * If the rdtgroup is a mon group and parent directory
+ * is a valid "mon_groups" directory, remove the mon group.
+ */
+- if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) {
++ if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
++ rdtgrp != &rdtgroup_default) {
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
+ rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ ret = rdtgroup_ctrl_remove(kn, rdtgrp);
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 9a82841c5b5a..0811d39820b2 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1654,6 +1654,7 @@ static int acpi_ec_add(struct acpi_device *device)
+
+ if (boot_ec && ec->command_addr == boot_ec->command_addr &&
+ ec->data_addr == boot_ec->data_addr) {
++ boot_ec_is_ecdt = false;
+ /*
+ * Trust PNP0C09 namespace location rather than
+ * ECDT ID. But trust ECDT GPE rather than _GPE
+@@ -1673,12 +1674,9 @@ static int acpi_ec_add(struct acpi_device *device)
+
+ if (ec == boot_ec)
+ acpi_handle_info(boot_ec->handle,
+- "Boot %s EC initialization complete\n",
++ "Boot %s EC used to handle transactions and events\n",
+ boot_ec_is_ecdt ? "ECDT" : "DSDT");
+
+- acpi_handle_info(ec->handle,
+- "EC: Used to handle transactions and events\n");
+-
+ device->driver_data = ec;
+
+ ret = !!request_region(ec->data_addr, 1, "EC data");
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index a3320f93616d..d0090f71585c 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -360,7 +360,7 @@ static union acpi_object *acpi_label_info(acpi_handle handle)
+
+ static u8 nfit_dsm_revid(unsigned family, unsigned func)
+ {
+- static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = {
++ static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
+ [NVDIMM_FAMILY_INTEL] = {
+ [NVDIMM_INTEL_GET_MODES] = 2,
+ [NVDIMM_INTEL_GET_FWINFO] = 2,
+@@ -386,7 +386,7 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func)
+
+ if (family > NVDIMM_FAMILY_MAX)
+ return 0;
+- if (func > 31)
++ if (func > NVDIMM_CMD_MAX)
+ return 0;
+ id = revid_table[family][func];
+ if (id == 0)
+@@ -492,7 +492,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ * Check for a valid command. For ND_CMD_CALL, we also have to
+ * make sure that the DSM function is supported.
+ */
+- if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
++ if (cmd == ND_CMD_CALL &&
++ (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask)))
+ return -ENOTTY;
+ else if (!test_bit(cmd, &cmd_mask))
+ return -ENOTTY;
+@@ -3492,7 +3493,8 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
+ if (nvdimm && cmd == ND_CMD_CALL &&
+ call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
+ func = call_pkg->nd_command;
+- if ((1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
++ if (func > NVDIMM_CMD_MAX ||
++ (1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
+ return -EOPNOTSUPP;
+ }
+
+diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
+index 24241941181c..b317f4043705 100644
+--- a/drivers/acpi/nfit/nfit.h
++++ b/drivers/acpi/nfit/nfit.h
+@@ -34,6 +34,7 @@
+ | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
+
+ #define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV
++#define NVDIMM_CMD_MAX 31
+
+ #define NVDIMM_STANDARD_CMDMASK \
+ (1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \
+diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
+index 22aede42a336..bda92980e015 100644
+--- a/drivers/clk/at91/clk-usb.c
++++ b/drivers/clk/at91/clk-usb.c
+@@ -211,7 +211,7 @@ _at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
+
+ usb->hw.init = &init;
+ usb->regmap = regmap;
+- usb->usbs_mask = SAM9X5_USBS_MASK;
++ usb->usbs_mask = usbs_mask;
+
+ hw = &usb->hw;
+ ret = clk_hw_register(NULL, &usb->hw);
+diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
+index 77398aefeb6d..7338a3bc71eb 100644
+--- a/drivers/clk/at91/sam9x60.c
++++ b/drivers/clk/at91/sam9x60.c
+@@ -237,9 +237,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
+
+ parent_names[0] = "pllack";
+ parent_names[1] = "upllck";
+- parent_names[2] = "mainck";
+- parent_names[3] = "mainck";
+- hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 4);
++ parent_names[2] = "main_osc";
++ hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3);
+ if (IS_ERR(hw))
+ goto err_free;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 7d340c9ec303..9a3a3a406f19 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2259,6 +2259,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
+ {
+ int i, r;
+
++ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
++ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index f73dff68e799..2b4df0cea646 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -3804,9 +3804,12 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
+ {
+ uint32_t i;
+
++ /* force the trim if mclk_switching is disabled to prevent flicker */
++ bool force_trim = (low_limit == high_limit);
+ for (i = 0; i < dpm_table->count; i++) {
+ /*skip the trim if od is enabled*/
+- if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
++ if ((!hwmgr->od_enabled || force_trim)
++ && (dpm_table->dpm_levels[i].value < low_limit
+ || dpm_table->dpm_levels[i].value > high_limit))
+ dpm_table->dpm_levels[i].enabled = false;
+ else
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index cc917200bdeb..5c4d687207f6 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -2906,49 +2906,6 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
+ gen8_update_reg_state_unlocked(ce, stream);
+ }
+
+-/**
+- * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
+- * @stream: An i915 perf stream
+- * @file: An i915 perf stream file
+- * @buf: destination buffer given by userspace
+- * @count: the number of bytes userspace wants to read
+- * @ppos: (inout) file seek position (unused)
+- *
+- * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
+- * ensure that if we've successfully copied any data then reporting that takes
+- * precedence over any internal error status, so the data isn't lost.
+- *
+- * For example ret will be -ENOSPC whenever there is more buffered data than
+- * can be copied to userspace, but that's only interesting if we weren't able
+- * to copy some data because it implies the userspace buffer is too small to
+- * receive a single record (and we never split records).
+- *
+- * Another case with ret == -EFAULT is more of a grey area since it would seem
+- * like bad form for userspace to ask us to overrun its buffer, but the user
+- * knows best:
+- *
+- * http://yarchive.net/comp/linux/partial_reads_writes.html
+- *
+- * Returns: The number of bytes copied or a negative error code on failure.
+- */
+-static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
+- struct file *file,
+- char __user *buf,
+- size_t count,
+- loff_t *ppos)
+-{
+- /* Note we keep the offset (aka bytes read) separate from any
+- * error status so that the final check for whether we return
+- * the bytes read with a higher precedence than any error (see
+- * comment below) doesn't need to be handled/duplicated in
+- * stream->ops->read() implementations.
+- */
+- size_t offset = 0;
+- int ret = stream->ops->read(stream, buf, count, &offset);
+-
+- return offset ?: (ret ?: -EAGAIN);
+-}
+-
+ /**
+ * i915_perf_read - handles read() FOP for i915 perf stream FDs
+ * @file: An i915 perf stream file
+@@ -2974,7 +2931,8 @@ static ssize_t i915_perf_read(struct file *file,
+ {
+ struct i915_perf_stream *stream = file->private_data;
+ struct i915_perf *perf = stream->perf;
+- ssize_t ret;
++ size_t offset = 0;
++ int ret;
+
+ /* To ensure it's handled consistently we simply treat all reads of a
+ * disabled stream as an error. In particular it might otherwise lead
+@@ -2997,13 +2955,12 @@ static ssize_t i915_perf_read(struct file *file,
+ return ret;
+
+ mutex_lock(&perf->lock);
+- ret = i915_perf_read_locked(stream, file,
+- buf, count, ppos);
++ ret = stream->ops->read(stream, buf, count, &offset);
+ mutex_unlock(&perf->lock);
+- } while (ret == -EAGAIN);
++ } while (!offset && !ret);
+ } else {
+ mutex_lock(&perf->lock);
+- ret = i915_perf_read_locked(stream, file, buf, count, ppos);
++ ret = stream->ops->read(stream, buf, count, &offset);
+ mutex_unlock(&perf->lock);
+ }
+
+@@ -3014,15 +2971,15 @@ static ssize_t i915_perf_read(struct file *file,
+ * and read() returning -EAGAIN. Clearing the oa.pollin state here
+ * effectively ensures we back off until the next hrtimer callback
+ * before reporting another EPOLLIN event.
++ * The exception to this is if ops->read() returned -ENOSPC which means
++ * that more OA data is available than could fit in the user provided
++ * buffer. In this case we want the next poll() call to not block.
+ */
+- if (ret >= 0 || ret == -EAGAIN) {
+- /* Maybe make ->pollin per-stream state if we support multiple
+- * concurrent streams in the future.
+- */
++ if (ret != -ENOSPC)
+ stream->pollin = false;
+- }
+
+- return ret;
++ /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
++ return offset ?: (ret ?: -EAGAIN);
+ }
+
+ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
+diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
+index 8a9268a5c66a..ad4b5412a9f4 100644
+--- a/drivers/hid/hid-lg-g15.c
++++ b/drivers/hid/hid-lg-g15.c
+@@ -803,8 +803,10 @@ static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ }
+
+ if (ret < 0) {
+- hid_err(hdev, "Error disabling keyboard emulation for the G-keys\n");
+- goto error_hw_stop;
++ hid_err(hdev, "Error %d disabling keyboard emulation for the G-keys, falling back to generic hid-input driver\n",
++ ret);
++ hid_set_drvdata(hdev, NULL);
++ return 0;
+ }
+
+ /* Get initial brightness levels */
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
+index 16dd338877d0..0c55c54372d7 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -370,10 +370,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
+ adap->dev.of_node = pdev->dev.of_node;
+ adap->nr = -1;
+
+- dev_pm_set_driver_flags(&pdev->dev,
+- DPM_FLAG_SMART_PREPARE |
+- DPM_FLAG_SMART_SUSPEND |
+- DPM_FLAG_LEAVE_SUSPENDED);
++ if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
++ dev_pm_set_driver_flags(&pdev->dev,
++ DPM_FLAG_SMART_PREPARE |
++ DPM_FLAG_LEAVE_SUSPENDED);
++ } else {
++ dev_pm_set_driver_flags(&pdev->dev,
++ DPM_FLAG_SMART_PREPARE |
++ DPM_FLAG_SMART_SUSPEND |
++ DPM_FLAG_LEAVE_SUSPENDED);
++ }
+
+ /* The code below assumes runtime PM to be disabled. */
+ WARN_ON(pm_runtime_enabled(&pdev->dev));
+diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
+index 8f6e6b08eadf..7e3ebf6ed2cd 100644
+--- a/drivers/irqchip/irq-ti-sci-inta.c
++++ b/drivers/irqchip/irq-ti-sci-inta.c
+@@ -37,6 +37,7 @@
+ #define VINT_ENABLE_SET_OFFSET 0x0
+ #define VINT_ENABLE_CLR_OFFSET 0x8
+ #define VINT_STATUS_OFFSET 0x18
++#define VINT_STATUS_MASKED_OFFSET 0x20
+
+ /**
+ * struct ti_sci_inta_event_desc - Description of an event coming to
+@@ -116,7 +117,7 @@ static void ti_sci_inta_irq_handler(struct irq_desc *desc)
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 +
+- VINT_STATUS_OFFSET);
++ VINT_STATUS_MASKED_OFFSET);
+
+ for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) {
+ virq = irq_find_mapping(domain, vint_desc->events[bit].hwirq);
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 936b9b65acca..9cf5da89dbbb 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -66,58 +66,6 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
+ MIB_DESC(1, 0xb8, "RxArlDrop"),
+ };
+
+-static int
+-mt7623_trgmii_write(struct mt7530_priv *priv, u32 reg, u32 val)
+-{
+- int ret;
+-
+- ret = regmap_write(priv->ethernet, TRGMII_BASE(reg), val);
+- if (ret < 0)
+- dev_err(priv->dev,
+- "failed to priv write register\n");
+- return ret;
+-}
+-
+-static u32
+-mt7623_trgmii_read(struct mt7530_priv *priv, u32 reg)
+-{
+- int ret;
+- u32 val;
+-
+- ret = regmap_read(priv->ethernet, TRGMII_BASE(reg), &val);
+- if (ret < 0) {
+- dev_err(priv->dev,
+- "failed to priv read register\n");
+- return ret;
+- }
+-
+- return val;
+-}
+-
+-static void
+-mt7623_trgmii_rmw(struct mt7530_priv *priv, u32 reg,
+- u32 mask, u32 set)
+-{
+- u32 val;
+-
+- val = mt7623_trgmii_read(priv, reg);
+- val &= ~mask;
+- val |= set;
+- mt7623_trgmii_write(priv, reg, val);
+-}
+-
+-static void
+-mt7623_trgmii_set(struct mt7530_priv *priv, u32 reg, u32 val)
+-{
+- mt7623_trgmii_rmw(priv, reg, 0, val);
+-}
+-
+-static void
+-mt7623_trgmii_clear(struct mt7530_priv *priv, u32 reg, u32 val)
+-{
+- mt7623_trgmii_rmw(priv, reg, val, 0);
+-}
+-
+ static int
+ core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad)
+ {
+@@ -530,27 +478,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
+ for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+ mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+ RD_TAP_MASK, RD_TAP(16));
+- else
+- if (priv->id != ID_MT7621)
+- mt7623_trgmii_set(priv, GSW_INTF_MODE,
+- INTF_MODE_TRGMII);
+-
+- return 0;
+-}
+-
+-static int
+-mt7623_pad_clk_setup(struct dsa_switch *ds)
+-{
+- struct mt7530_priv *priv = ds->priv;
+- int i;
+-
+- for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+- mt7623_trgmii_write(priv, GSW_TRGMII_TD_ODT(i),
+- TD_DM_DRVP(8) | TD_DM_DRVN(8));
+-
+- mt7623_trgmii_set(priv, GSW_TRGMII_RCK_CTRL, RX_RST | RXC_DQSISEL);
+- mt7623_trgmii_clear(priv, GSW_TRGMII_RCK_CTRL, RX_RST);
+-
+ return 0;
+ }
+
+@@ -857,8 +784,9 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
+ */
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_MATRIX_MODE);
+- mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+- VLAN_ATTR(MT7530_VLAN_TRANSPARENT));
++ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
++ VLAN_ATTR(MT7530_VLAN_TRANSPARENT) |
++ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ if (dsa_is_user_port(ds, i) &&
+@@ -874,8 +802,8 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
+ if (all_user_ports_removed) {
+ mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT),
+ PCR_MATRIX(dsa_user_ports(priv->ds)));
+- mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT),
+- PORT_SPEC_TAG);
++ mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), PORT_SPEC_TAG
++ | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+ }
+
+@@ -901,8 +829,9 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
+ /* Set the port as a user port which is to be able to recognize VID
+ * from incoming packets before fetching entry within the VLAN table.
+ */
+- mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+- VLAN_ATTR(MT7530_VLAN_USER));
++ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
++ VLAN_ATTR(MT7530_VLAN_USER) |
++ PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ }
+
+ static void
+@@ -1255,10 +1184,6 @@ mt7530_setup(struct dsa_switch *ds)
+ dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
+
+ if (priv->id == ID_MT7530) {
+- priv->ethernet = syscon_node_to_regmap(dn);
+- if (IS_ERR(priv->ethernet))
+- return PTR_ERR(priv->ethernet);
+-
+ regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
+ ret = regulator_enable(priv->core_pwr);
+ if (ret < 0) {
+@@ -1332,6 +1257,10 @@ mt7530_setup(struct dsa_switch *ds)
+ mt7530_cpu_port_enable(priv, i);
+ else
+ mt7530_port_disable(ds, i);
++
++ /* Enable consistent egress tag */
++ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
++ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+
+ /* Setup port 5 */
+@@ -1420,14 +1349,6 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
+ /* Setup TX circuit incluing relevant PAD and driving */
+ mt7530_pad_clk_setup(ds, state->interface);
+
+- if (priv->id == ID_MT7530) {
+- /* Setup RX circuit, relevant PAD and driving on the
+- * host which must be placed after the setup on the
+- * device side is all finished.
+- */
+- mt7623_pad_clk_setup(ds);
+- }
+-
+ priv->p6_interface = state->interface;
+ break;
+ default:
+diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
+index ccb9da8cad0d..756140b7dfd5 100644
+--- a/drivers/net/dsa/mt7530.h
++++ b/drivers/net/dsa/mt7530.h
+@@ -167,9 +167,16 @@ enum mt7530_port_mode {
+ /* Register for port vlan control */
+ #define MT7530_PVC_P(x) (0x2010 + ((x) * 0x100))
+ #define PORT_SPEC_TAG BIT(5)
++#define PVC_EG_TAG(x) (((x) & 0x7) << 8)
++#define PVC_EG_TAG_MASK PVC_EG_TAG(7)
+ #define VLAN_ATTR(x) (((x) & 0x3) << 6)
+ #define VLAN_ATTR_MASK VLAN_ATTR(3)
+
++enum mt7530_vlan_port_eg_tag {
++ MT7530_VLAN_EG_DISABLED = 0,
++ MT7530_VLAN_EG_CONSISTENT = 1,
++};
++
+ enum mt7530_vlan_port_attr {
+ MT7530_VLAN_USER = 0,
+ MT7530_VLAN_TRANSPARENT = 3,
+@@ -268,7 +275,6 @@ enum mt7530_vlan_port_attr {
+
+ /* Registers for TRGMII on the both side */
+ #define MT7530_TRGMII_RCK_CTRL 0x7a00
+-#define GSW_TRGMII_RCK_CTRL 0x300
+ #define RX_RST BIT(31)
+ #define RXC_DQSISEL BIT(30)
+ #define DQSI1_TAP_MASK (0x7f << 8)
+@@ -277,31 +283,24 @@ enum mt7530_vlan_port_attr {
+ #define DQSI0_TAP(x) ((x) & 0x7f)
+
+ #define MT7530_TRGMII_RCK_RTT 0x7a04
+-#define GSW_TRGMII_RCK_RTT 0x304
+ #define DQS1_GATE BIT(31)
+ #define DQS0_GATE BIT(30)
+
+ #define MT7530_TRGMII_RD(x) (0x7a10 + (x) * 8)
+-#define GSW_TRGMII_RD(x) (0x310 + (x) * 8)
+ #define BSLIP_EN BIT(31)
+ #define EDGE_CHK BIT(30)
+ #define RD_TAP_MASK 0x7f
+ #define RD_TAP(x) ((x) & 0x7f)
+
+-#define GSW_TRGMII_TXCTRL 0x340
+ #define MT7530_TRGMII_TXCTRL 0x7a40
+ #define TRAIN_TXEN BIT(31)
+ #define TXC_INV BIT(30)
+ #define TX_RST BIT(28)
+
+ #define MT7530_TRGMII_TD_ODT(i) (0x7a54 + 8 * (i))
+-#define GSW_TRGMII_TD_ODT(i) (0x354 + 8 * (i))
+ #define TD_DM_DRVP(x) ((x) & 0xf)
+ #define TD_DM_DRVN(x) (((x) & 0xf) << 4)
+
+-#define GSW_INTF_MODE 0x390
+-#define INTF_MODE_TRGMII BIT(1)
+-
+ #define MT7530_TRGMII_TCK_CTRL 0x7a78
+ #define TCK_TAP(x) (((x) & 0xf) << 8)
+
+@@ -434,7 +433,6 @@ static const char *p5_intf_modes(unsigned int p5_interface)
+ * @ds: The pointer to the dsa core structure
+ * @bus: The bus used for the device and built-in PHY
+ * @rstc: The pointer to reset control used by MCM
+- * @ethernet: The regmap used for access TRGMII-based registers
+ * @core_pwr: The power supplied into the core
+ * @io_pwr: The power supplied into the I/O
+ * @reset: The descriptor for GPIO line tied to its reset pin
+@@ -451,7 +449,6 @@ struct mt7530_priv {
+ struct dsa_switch *ds;
+ struct mii_bus *bus;
+ struct reset_control *rstc;
+- struct regmap *ethernet;
+ struct regulator *core_pwr;
+ struct regulator *io_pwr;
+ struct gpio_desc *reset;
+diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
+index b7f92464815d..07ba3eda3c6e 100644
+--- a/drivers/net/dsa/ocelot/felix.c
++++ b/drivers/net/dsa/ocelot/felix.c
+@@ -46,11 +46,8 @@ static int felix_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+ {
+ struct ocelot *ocelot = ds->priv;
+- bool vlan_aware;
+
+- vlan_aware = dsa_port_is_vlan_filtering(dsa_to_port(ds, port));
+-
+- return ocelot_fdb_add(ocelot, port, addr, vid, vlan_aware);
++ return ocelot_fdb_add(ocelot, port, addr, vid);
+ }
+
+ static int felix_fdb_del(struct dsa_switch *ds, int port,
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 98f8f2033154..3bd20f765120 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -514,7 +514,7 @@ static void xgbe_isr_task(unsigned long data)
+ xgbe_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+- __napi_schedule_irqoff(&pdata->napi);
++ __napi_schedule(&pdata->napi);
+ }
+ } else {
+ /* Don't clear Rx/Tx status if doing per channel DMA
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 527ad2aadcca..ec035bef98b8 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -65,6 +65,17 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
+ return __raw_readl(eth->base + reg);
+ }
+
++u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
++{
++ u32 val;
++
++ val = mtk_r32(eth, reg);
++ val &= ~mask;
++ val |= set;
++ mtk_w32(eth, val, reg);
++ return reg;
++}
++
+ static int mtk_mdio_busy_wait(struct mtk_eth *eth)
+ {
+ unsigned long t_start = jiffies;
+@@ -193,7 +204,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+- u32 mcr_cur, mcr_new, sid;
++ u32 mcr_cur, mcr_new, sid, i;
+ int val, ge_mode, err;
+
+ /* MT76x8 has no hardware settings between for the MAC */
+@@ -255,6 +266,17 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
+ PHY_INTERFACE_MODE_TRGMII)
+ mtk_gmac0_rgmii_adjust(mac->hw,
+ state->speed);
++
++ /* mt7623_pad_clk_setup */
++ for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
++ mtk_w32(mac->hw,
++ TD_DM_DRVP(8) | TD_DM_DRVN(8),
++ TRGMII_TD_ODT(i));
++
++ /* Assert/release MT7623 RXC reset */
++ mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
++ TRGMII_RCK_CTRL);
++ mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
+ }
+ }
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index 85830fe14a1b..454cfcd465fd 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -352,10 +352,13 @@
+ #define DQSI0(x) ((x << 0) & GENMASK(6, 0))
+ #define DQSI1(x) ((x << 8) & GENMASK(14, 8))
+ #define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
++#define RXC_RST BIT(31)
+ #define RXC_DQSISEL BIT(30)
+ #define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
+ #define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2)
+
++#define NUM_TRGMII_CTRL 5
++
+ /* TRGMII RXC control register */
+ #define TRGMII_TCK_CTRL 0x10340
+ #define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+@@ -363,6 +366,11 @@
+ #define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2)
+ #define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2))
+
++/* TRGMII TX Drive Strength */
++#define TRGMII_TD_ODT(i) (0x10354 + 8 * (i))
++#define TD_DM_DRVP(x) ((x) & 0xf)
++#define TD_DM_DRVN(x) (((x) & 0xf) << 4)
++
+ /* TRGMII Interface mode register */
+ #define INTF_MODE 0x10390
+ #define TRGMII_INTF_DIS BIT(0)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+index ac108f1e5bd6..184c3eaefbcb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+@@ -23,7 +23,10 @@ static int mlx5_devlink_flash_update(struct devlink *devlink,
+ if (err)
+ return err;
+
+- return mlx5_firmware_flash(dev, fw, extack);
++ err = mlx5_firmware_flash(dev, fw, extack);
++ release_firmware(fw);
++
++ return err;
+ }
+
+ static u8 mlx5_fw_ver_major(u32 version)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 55ceabf077b2..11426f94c90c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -1035,14 +1035,15 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *chs);
+ void mlx5e_close_channels(struct mlx5e_channels *chs);
+
+-/* Function pointer to be used to modify WH settings while
++/* Function pointer to be used to modify HW or kernel settings while
+ * switching channels
+ */
+-typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
++typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv);
+ int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
+ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *new_chs,
+- mlx5e_fp_hw_modify hw_modify);
++ mlx5e_fp_preactivate preactivate);
++int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
+ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
+ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index c6776f308d5e..304ddce6b087 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -445,9 +445,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ *cur_params = new_channels.params;
+- if (!netif_is_rxfh_configured(priv->netdev))
+- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
+- MLX5E_INDIR_RQT_SIZE, count);
++ mlx5e_num_channels_changed(priv);
+ goto out;
+ }
+
+@@ -455,12 +453,8 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
+ if (arfs_enabled)
+ mlx5e_arfs_disable(priv);
+
+- if (!netif_is_rxfh_configured(priv->netdev))
+- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
+- MLX5E_INDIR_RQT_SIZE, count);
+-
+ /* Switch to new channels, set new parameters and close old ones */
+- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
++ err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_num_channels_changed);
+
+ if (arfs_enabled) {
+ int err2 = mlx5e_arfs_enable(priv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 67fe002dfade..e56088db30df 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -2895,6 +2895,28 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
+ netdev_set_tc_queue(netdev, tc, nch, 0);
+ }
+
++static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
++{
++ int num_txqs = priv->channels.num * priv->channels.params.num_tc;
++ int num_rxqs = priv->channels.num * priv->profile->rq_groups;
++ struct net_device *netdev = priv->netdev;
++
++ mlx5e_netdev_set_tcs(netdev);
++ netif_set_real_num_tx_queues(netdev, num_txqs);
++ netif_set_real_num_rx_queues(netdev, num_rxqs);
++}
++
++int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
++{
++ u16 count = priv->channels.params.num_channels;
++
++ if (!netif_is_rxfh_configured(priv->netdev))
++ mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
++ MLX5E_INDIR_RQT_SIZE, count);
++
++ return 0;
++}
++
+ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
+ {
+ int i, ch;
+@@ -2916,13 +2938,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
+
+ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
+ {
+- int num_txqs = priv->channels.num * priv->channels.params.num_tc;
+- int num_rxqs = priv->channels.num * priv->profile->rq_groups;
+- struct net_device *netdev = priv->netdev;
+-
+- mlx5e_netdev_set_tcs(netdev);
+- netif_set_real_num_tx_queues(netdev, num_txqs);
+- netif_set_real_num_rx_queues(netdev, num_rxqs);
++ mlx5e_update_netdev_queues(priv);
+
+ mlx5e_build_txq_maps(priv);
+ mlx5e_activate_channels(&priv->channels);
+@@ -2958,7 +2974,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
+
+ static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *new_chs,
+- mlx5e_fp_hw_modify hw_modify)
++ mlx5e_fp_preactivate preactivate)
+ {
+ struct net_device *netdev = priv->netdev;
+ int new_num_txqs;
+@@ -2977,9 +2993,11 @@ static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+
+ priv->channels = *new_chs;
+
+- /* New channels are ready to roll, modify HW settings if needed */
+- if (hw_modify)
+- hw_modify(priv);
++ /* New channels are ready to roll, call the preactivate hook if needed
++ * to modify HW settings or update kernel parameters.
++ */
++ if (preactivate)
++ preactivate(priv);
+
+ priv->profile->update_rx(priv);
+ mlx5e_activate_priv_channels(priv);
+@@ -2991,7 +3009,7 @@ static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+
+ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *new_chs,
+- mlx5e_fp_hw_modify hw_modify)
++ mlx5e_fp_preactivate preactivate)
+ {
+ int err;
+
+@@ -2999,7 +3017,7 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
+ if (err)
+ return err;
+
+- mlx5e_switch_priv_channels(priv, new_chs, hw_modify);
++ mlx5e_switch_priv_channels(priv, new_chs, preactivate);
+ return 0;
+ }
+
+@@ -5301,9 +5319,10 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
+ max_nch = mlx5e_get_max_num_channels(priv->mdev);
+ if (priv->channels.params.num_channels > max_nch) {
+ mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
++ /* Reducing the number of channels - RXFH has to be reset. */
++ priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
+ priv->channels.params.num_channels = max_nch;
+- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
+- MLX5E_INDIR_RQT_SIZE, max_nch);
++ mlx5e_num_channels_changed(priv);
+ }
+
+ err = profile->init_tx(priv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index f175cb24bb67..f4d96344022f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -1854,29 +1854,30 @@ static int register_devlink_port(struct mlx5_core_dev *dev,
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct netdev_phys_item_id ppid = {};
+ unsigned int dl_port_index = 0;
++ u16 pfnum;
+
+ if (!is_devlink_port_supported(dev, rpriv))
+ return 0;
+
+ mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
++ pfnum = PCI_FUNC(dev->pdev->devfn);
+
+ if (rep->vport == MLX5_VPORT_UPLINK) {
+ devlink_port_attrs_set(&rpriv->dl_port,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL,
+- PCI_FUNC(dev->pdev->devfn), false, 0,
++ pfnum, false, 0,
+ &ppid.id[0], ppid.id_len);
+ dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
+ } else if (rep->vport == MLX5_VPORT_PF) {
+ devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
+ &ppid.id[0], ppid.id_len,
+- dev->pdev->devfn);
++ pfnum);
+ dl_port_index = rep->vport;
+ } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
+ rpriv->rep->vport)) {
+ devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
+ &ppid.id[0], ppid.id_len,
+- dev->pdev->devfn,
+- rep->vport - 1);
++ pfnum, rep->vport - 1);
+ dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index d4bad2a444e2..f5bcaf7d5d9a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -3221,12 +3221,13 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ u32 *action)
+ {
+- int nest_level = attr->parse_attr->filter_dev->lower_level;
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_POP,
+ };
+- int err = 0;
++ int nest_level, err = 0;
+
++ nest_level = attr->parse_attr->filter_dev->lower_level -
++ priv->netdev->lower_level;
+ while (nest_level--) {
+ err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
+ if (err)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index d9f4e8c59c1f..68e7ef7ca52d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -243,7 +243,7 @@ recover_from_sw_reset:
+ if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ break;
+
+- cond_resched();
++ msleep(20);
+ } while (!time_after(jiffies, end));
+
+ if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index 88c0464a54e2..9f6fe880b95f 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -183,44 +183,47 @@ static void ocelot_vlan_mode(struct ocelot *ocelot, int port,
+ ocelot_write(ocelot, val, ANA_VLANMASK);
+ }
+
+-void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+- bool vlan_aware)
++static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
++ u16 vid)
+ {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+- u32 val;
++ u32 val = 0;
+
+- if (vlan_aware)
+- val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+- ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
+- else
+- val = 0;
+- ocelot_rmw_gix(ocelot, val,
+- ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+- ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
+- ANA_PORT_VLAN_CFG, port);
++ if (ocelot_port->vid != vid) {
++ /* Always permit deleting the native VLAN (vid = 0) */
++ if (ocelot_port->vid && vid) {
++ dev_err(ocelot->dev,
++ "Port already has a native VLAN: %d\n",
++ ocelot_port->vid);
++ return -EBUSY;
++ }
++ ocelot_port->vid = vid;
++ }
++
++ ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid),
++ REW_PORT_VLAN_CFG_PORT_VID_M,
++ REW_PORT_VLAN_CFG, port);
+
+- if (vlan_aware && !ocelot_port->vid)
++ if (ocelot_port->vlan_aware && !ocelot_port->vid)
+ /* If port is vlan-aware and tagged, drop untagged and priority
+ * tagged frames.
+ */
+ val = ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+- else
+- val = 0;
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
+ ANA_PORT_DROP_CFG, port);
+
+- if (vlan_aware) {
++ if (ocelot_port->vlan_aware) {
+ if (ocelot_port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+- val |= REW_TAG_CFG_TAG_CFG(1);
++ val = REW_TAG_CFG_TAG_CFG(1);
+ else
+ /* Tag all frames */
+- val |= REW_TAG_CFG_TAG_CFG(3);
++ val = REW_TAG_CFG_TAG_CFG(3);
+ } else {
+ /* Port tagging disabled. */
+ val = REW_TAG_CFG_TAG_CFG(0);
+@@ -228,31 +231,31 @@ void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+ ocelot_rmw_gix(ocelot, val,
+ REW_TAG_CFG_TAG_CFG_M,
+ REW_TAG_CFG, port);
++
++ return 0;
+ }
+-EXPORT_SYMBOL(ocelot_port_vlan_filtering);
+
+-static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
+- u16 vid)
++void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
++ bool vlan_aware)
+ {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
++ u32 val;
+
+- if (ocelot_port->vid != vid) {
+- /* Always permit deleting the native VLAN (vid = 0) */
+- if (ocelot_port->vid && vid) {
+- dev_err(ocelot->dev,
+- "Port already has a native VLAN: %d\n",
+- ocelot_port->vid);
+- return -EBUSY;
+- }
+- ocelot_port->vid = vid;
+- }
++ ocelot_port->vlan_aware = vlan_aware;
+
+- ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid),
+- REW_PORT_VLAN_CFG_PORT_VID_M,
+- REW_PORT_VLAN_CFG, port);
++ if (vlan_aware)
++ val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
++ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
++ else
++ val = 0;
++ ocelot_rmw_gix(ocelot, val,
++ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
++ ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
++ ANA_PORT_VLAN_CFG, port);
+
+- return 0;
++ ocelot_port_set_native_vlan(ocelot, port, ocelot_port->vid);
+ }
++EXPORT_SYMBOL(ocelot_port_vlan_filtering);
+
+ /* Default vlan to clasify for untagged frames (may be zero) */
+ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, u16 pvid)
+@@ -857,12 +860,12 @@ static void ocelot_get_stats64(struct net_device *dev,
+ }
+
+ int ocelot_fdb_add(struct ocelot *ocelot, int port,
+- const unsigned char *addr, u16 vid, bool vlan_aware)
++ const unsigned char *addr, u16 vid)
+ {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!vid) {
+- if (!vlan_aware)
++ if (!ocelot_port->vlan_aware)
+ /* If the bridge is not VLAN aware and no VID was
+ * provided, set it to pvid to ensure the MAC entry
+ * matches incoming untagged packets
+@@ -889,7 +892,7 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+- return ocelot_fdb_add(ocelot, port, addr, vid, priv->vlan_aware);
++ return ocelot_fdb_add(ocelot, port, addr, vid);
+ }
+
+ int ocelot_fdb_del(struct ocelot *ocelot, int port,
+@@ -1488,8 +1491,8 @@ static int ocelot_port_attr_set(struct net_device *dev,
+ ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+- priv->vlan_aware = attr->u.vlan_filtering;
+- ocelot_port_vlan_filtering(ocelot, port, priv->vlan_aware);
++ ocelot_port_vlan_filtering(ocelot, port,
++ attr->u.vlan_filtering);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
+@@ -1860,7 +1863,6 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
+ } else {
+ err = ocelot_port_bridge_leave(ocelot, port,
+ info->upper_dev);
+- priv->vlan_aware = false;
+ }
+ }
+ if (netif_is_lag_master(info->upper_dev)) {
+diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
+index c259114c48fd..2ff09de34b0d 100644
+--- a/drivers/net/ethernet/mscc/ocelot.h
++++ b/drivers/net/ethernet/mscc/ocelot.h
+@@ -66,8 +66,6 @@ struct ocelot_port_private {
+ struct phy_device *phy;
+ u8 chip_port;
+
+- u8 vlan_aware;
+-
+ phy_interface_t phy_mode;
+ struct phy *serdes;
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+index 7d40760e9ba8..0e1ca2cba3c7 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+@@ -150,6 +150,8 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
+ plat_dat->init = sun7i_gmac_init;
+ plat_dat->exit = sun7i_gmac_exit;
+ plat_dat->fix_mac_speed = sun7i_fix_speed;
++ plat_dat->tx_fifo_size = 4096;
++ plat_dat->rx_fifo_size = 16384;
+
+ ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index e9bf54a579df..f0b9c43f6e31 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -576,8 +576,13 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+ value |= XGMAC_VLAN_EDVLP;
+ value |= XGMAC_VLAN_ESVL;
+ value |= XGMAC_VLAN_DOVLTC;
++ } else {
++ value &= ~XGMAC_VLAN_EDVLP;
++ value &= ~XGMAC_VLAN_ESVL;
++ value &= ~XGMAC_VLAN_DOVLTC;
+ }
+
++ value &= ~XGMAC_VLAN_VID;
+ writel(value, ioaddr + XGMAC_VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+@@ -588,13 +593,19 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+
+ value = readl(ioaddr + XGMAC_VLAN_TAG);
+
++ value &= ~XGMAC_VLAN_VTHM;
+ value |= XGMAC_VLAN_ETV;
+ if (is_double) {
+ value |= XGMAC_VLAN_EDVLP;
+ value |= XGMAC_VLAN_ESVL;
+ value |= XGMAC_VLAN_DOVLTC;
++ } else {
++ value &= ~XGMAC_VLAN_EDVLP;
++ value &= ~XGMAC_VLAN_ESVL;
++ value &= ~XGMAC_VLAN_DOVLTC;
+ }
+
++ value &= ~XGMAC_VLAN_VID;
+ writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
+ } else {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 51b64f087717..663c68ed6ef9 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -1154,7 +1154,7 @@ static struct phy_driver ksphy_driver[] = {
+ .driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
+ .config_init = ksz9131_config_init,
+- .read_status = ksz9031_read_status,
++ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 6d3317d868d2..7874046a57dd 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1925,6 +1925,7 @@ drop:
+
+ skb_reset_network_header(skb);
+ skb_probe_transport_header(skb);
++ skb_record_rx_queue(skb, tfile->queue_index);
+
+ if (skb_xdp) {
+ struct bpf_prog *xdp_prog;
+@@ -2498,6 +2499,7 @@ build:
+ skb->protocol = eth_type_trans(skb, tun->dev);
+ skb_reset_network_header(skb);
+ skb_probe_transport_header(skb);
++ skb_record_rx_queue(skb, tfile->queue_index);
+
+ if (skb_xdp) {
+ err = do_xdp_generic(xdp_prog, skb);
+@@ -2509,7 +2511,6 @@ build:
+ !tfile->detached)
+ rxhash = __skb_get_hash_symmetric(skb);
+
+- skb_record_rx_queue(skb, tfile->queue_index);
+ netif_receive_skb(skb);
+
+ /* No need for get_cpu_ptr() here since this function is
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index 03738107fd10..151752c00727 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -3600,9 +3600,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ }
+
+ if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
+- hwname = kasprintf(GFP_KERNEL, "%.*s",
+- nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+- (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
++ hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]),
++ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
++ GFP_KERNEL);
+ if (!hwname)
+ return -ENOMEM;
+ param.hwname = hwname;
+@@ -3622,9 +3622,9 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
+ idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
+ } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
+- hwname = kasprintf(GFP_KERNEL, "%.*s",
+- nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+- (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
++ hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]),
++ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
++ GFP_KERNEL);
+ if (!hwname)
+ return -ENOMEM;
+ } else
+diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
+index bd068afe43b5..1b38bc8e164c 100644
+--- a/drivers/platform/chrome/cros_ec_rpmsg.c
++++ b/drivers/platform/chrome/cros_ec_rpmsg.c
+@@ -42,6 +42,8 @@ struct cros_ec_rpmsg {
+ struct completion xfer_ack;
+ struct work_struct host_event_work;
+ struct rpmsg_endpoint *ept;
++ bool has_pending_host_event;
++ bool probe_done;
+ };
+
+ /**
+@@ -175,7 +177,14 @@ static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ memcpy(ec_dev->din, resp->data, len);
+ complete(&ec_rpmsg->xfer_ack);
+ } else if (resp->type == HOST_EVENT_MARK) {
+- schedule_work(&ec_rpmsg->host_event_work);
++ /*
++ * If the host event is sent before cros_ec_register is
++ * finished, queue the host event.
++ */
++ if (ec_rpmsg->probe_done)
++ schedule_work(&ec_rpmsg->host_event_work);
++ else
++ ec_rpmsg->has_pending_host_event = true;
+ } else {
+ dev_warn(ec_dev->dev, "rpmsg received invalid type = %d",
+ resp->type);
+@@ -238,6 +247,11 @@ static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
+ return ret;
+ }
+
++ ec_rpmsg->probe_done = true;
++
++ if (ec_rpmsg->has_pending_host_event)
++ schedule_work(&ec_rpmsg->host_event_work);
++
+ return 0;
+ }
+
+diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
+index b07bdca3d510..590375be5214 100644
+--- a/drivers/pwm/pwm-pca9685.c
++++ b/drivers/pwm/pwm-pca9685.c
+@@ -20,6 +20,7 @@
+ #include <linux/slab.h>
+ #include <linux/delay.h>
+ #include <linux/pm_runtime.h>
++#include <linux/bitmap.h>
+
+ /*
+ * Because the PCA9685 has only one prescaler per chip, changing the period of
+@@ -74,6 +75,7 @@ struct pca9685 {
+ #if IS_ENABLED(CONFIG_GPIOLIB)
+ struct mutex lock;
+ struct gpio_chip gpio;
++ DECLARE_BITMAP(pwms_inuse, PCA9685_MAXCHAN + 1);
+ #endif
+ };
+
+@@ -83,51 +85,51 @@ static inline struct pca9685 *to_pca(struct pwm_chip *chip)
+ }
+
+ #if IS_ENABLED(CONFIG_GPIOLIB)
+-static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
++static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx)
+ {
+- struct pca9685 *pca = gpiochip_get_data(gpio);
+- struct pwm_device *pwm;
++ bool is_inuse;
+
+ mutex_lock(&pca->lock);
+-
+- pwm = &pca->chip.pwms[offset];
+-
+- if (pwm->flags & (PWMF_REQUESTED | PWMF_EXPORTED)) {
+- mutex_unlock(&pca->lock);
+- return -EBUSY;
++ if (pwm_idx >= PCA9685_MAXCHAN) {
++ /*
++ * "all LEDs" channel:
++ * pretend already in use if any of the PWMs are requested
++ */
++ if (!bitmap_empty(pca->pwms_inuse, PCA9685_MAXCHAN)) {
++ is_inuse = true;
++ goto out;
++ }
++ } else {
++ /*
++ * regular channel:
++ * pretend already in use if the "all LEDs" channel is requested
++ */
++ if (test_bit(PCA9685_MAXCHAN, pca->pwms_inuse)) {
++ is_inuse = true;
++ goto out;
++ }
+ }
+-
+- pwm_set_chip_data(pwm, (void *)1);
+-
++ is_inuse = test_and_set_bit(pwm_idx, pca->pwms_inuse);
++out:
+ mutex_unlock(&pca->lock);
+- pm_runtime_get_sync(pca->chip.dev);
+- return 0;
++ return is_inuse;
+ }
+
+-static bool pca9685_pwm_is_gpio(struct pca9685 *pca, struct pwm_device *pwm)
++static void pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
+ {
+- bool is_gpio = false;
+-
+ mutex_lock(&pca->lock);
++ clear_bit(pwm_idx, pca->pwms_inuse);
++ mutex_unlock(&pca->lock);
++}
+
+- if (pwm->hwpwm >= PCA9685_MAXCHAN) {
+- unsigned int i;
+-
+- /*
+- * Check if any of the GPIOs are requested and in that case
+- * prevent using the "all LEDs" channel.
+- */
+- for (i = 0; i < pca->gpio.ngpio; i++)
+- if (gpiochip_is_requested(&pca->gpio, i)) {
+- is_gpio = true;
+- break;
+- }
+- } else if (pwm_get_chip_data(pwm)) {
+- is_gpio = true;
+- }
++static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
++{
++ struct pca9685 *pca = gpiochip_get_data(gpio);
+
+- mutex_unlock(&pca->lock);
+- return is_gpio;
++ if (pca9685_pwm_test_and_set_inuse(pca, offset))
++ return -EBUSY;
++ pm_runtime_get_sync(pca->chip.dev);
++ return 0;
+ }
+
+ static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset)
+@@ -162,6 +164,7 @@ static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
+
+ pca9685_pwm_gpio_set(gpio, offset, 0);
+ pm_runtime_put(pca->chip.dev);
++ pca9685_pwm_clear_inuse(pca, offset);
+ }
+
+ static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip,
+@@ -213,12 +216,17 @@ static int pca9685_pwm_gpio_probe(struct pca9685 *pca)
+ return devm_gpiochip_add_data(dev, &pca->gpio, pca);
+ }
+ #else
+-static inline bool pca9685_pwm_is_gpio(struct pca9685 *pca,
+- struct pwm_device *pwm)
++static inline bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca,
++ int pwm_idx)
+ {
+ return false;
+ }
+
++static inline void
++pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
++{
++}
++
+ static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca)
+ {
+ return 0;
+@@ -402,7 +410,7 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+ {
+ struct pca9685 *pca = to_pca(chip);
+
+- if (pca9685_pwm_is_gpio(pca, pwm))
++ if (pca9685_pwm_test_and_set_inuse(pca, pwm->hwpwm))
+ return -EBUSY;
+ pm_runtime_get_sync(chip->dev);
+
+@@ -411,8 +419,11 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+
+ static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+ {
++ struct pca9685 *pca = to_pca(chip);
++
+ pca9685_pwm_disable(chip, pwm);
+ pm_runtime_put(chip->dev);
++ pca9685_pwm_clear_inuse(pca, pwm->hwpwm);
+ }
+
+ static const struct pwm_ops pca9685_pwm_ops = {
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 5d8f6356c556..088c1a5dd1cf 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -1542,6 +1542,11 @@ start:
+ */
+ if (ufshcd_can_hibern8_during_gating(hba) &&
+ ufshcd_is_link_hibern8(hba)) {
++ if (async) {
++ rc = -EAGAIN;
++ hba->clk_gating.active_reqs--;
++ break;
++ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 09e55ea0bf5d..bc00f71cf2d1 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4301,30 +4301,37 @@ int iscsit_close_connection(
+ if (!atomic_read(&sess->session_reinstatement) &&
+ atomic_read(&sess->session_fall_back_to_erl0)) {
+ spin_unlock_bh(&sess->conn_lock);
++ complete_all(&sess->session_wait_comp);
+ iscsit_close_session(sess);
+
+ return 0;
+ } else if (atomic_read(&sess->session_logout)) {
+ pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
+ sess->session_state = TARG_SESS_STATE_FREE;
+- spin_unlock_bh(&sess->conn_lock);
+
+- if (atomic_read(&sess->sleep_on_sess_wait_comp))
+- complete(&sess->session_wait_comp);
++ if (atomic_read(&sess->session_close)) {
++ spin_unlock_bh(&sess->conn_lock);
++ complete_all(&sess->session_wait_comp);
++ iscsit_close_session(sess);
++ } else {
++ spin_unlock_bh(&sess->conn_lock);
++ }
+
+ return 0;
+ } else {
+ pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
+ sess->session_state = TARG_SESS_STATE_FAILED;
+
+- if (!atomic_read(&sess->session_continuation)) {
+- spin_unlock_bh(&sess->conn_lock);
++ if (!atomic_read(&sess->session_continuation))
+ iscsit_start_time2retain_handler(sess);
+- } else
+- spin_unlock_bh(&sess->conn_lock);
+
+- if (atomic_read(&sess->sleep_on_sess_wait_comp))
+- complete(&sess->session_wait_comp);
++ if (atomic_read(&sess->session_close)) {
++ spin_unlock_bh(&sess->conn_lock);
++ complete_all(&sess->session_wait_comp);
++ iscsit_close_session(sess);
++ } else {
++ spin_unlock_bh(&sess->conn_lock);
++ }
+
+ return 0;
+ }
+@@ -4430,9 +4437,9 @@ static void iscsit_logout_post_handler_closesession(
+ complete(&conn->conn_logout_comp);
+
+ iscsit_dec_conn_usage_count(conn);
++ atomic_set(&sess->session_close, 1);
+ iscsit_stop_session(sess, sleep, sleep);
+ iscsit_dec_session_usage_count(sess);
+- iscsit_close_session(sess);
+ }
+
+ static void iscsit_logout_post_handler_samecid(
+@@ -4567,49 +4574,6 @@ void iscsit_fail_session(struct iscsi_session *sess)
+ sess->session_state = TARG_SESS_STATE_FAILED;
+ }
+
+-int iscsit_free_session(struct iscsi_session *sess)
+-{
+- u16 conn_count = atomic_read(&sess->nconn);
+- struct iscsi_conn *conn, *conn_tmp = NULL;
+- int is_last;
+-
+- spin_lock_bh(&sess->conn_lock);
+- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+-
+- list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+- conn_list) {
+- if (conn_count == 0)
+- break;
+-
+- if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
+- is_last = 1;
+- } else {
+- iscsit_inc_conn_usage_count(conn_tmp);
+- is_last = 0;
+- }
+- iscsit_inc_conn_usage_count(conn);
+-
+- spin_unlock_bh(&sess->conn_lock);
+- iscsit_cause_connection_reinstatement(conn, 1);
+- spin_lock_bh(&sess->conn_lock);
+-
+- iscsit_dec_conn_usage_count(conn);
+- if (is_last == 0)
+- iscsit_dec_conn_usage_count(conn_tmp);
+-
+- conn_count--;
+- }
+-
+- if (atomic_read(&sess->nconn)) {
+- spin_unlock_bh(&sess->conn_lock);
+- wait_for_completion(&sess->session_wait_comp);
+- } else
+- spin_unlock_bh(&sess->conn_lock);
+-
+- iscsit_close_session(sess);
+- return 0;
+-}
+-
+ void iscsit_stop_session(
+ struct iscsi_session *sess,
+ int session_sleep,
+@@ -4620,8 +4584,6 @@ void iscsit_stop_session(
+ int is_last;
+
+ spin_lock_bh(&sess->conn_lock);
+- if (session_sleep)
+- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+
+ if (connection_sleep) {
+ list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+@@ -4679,12 +4641,15 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+ spin_lock(&sess->conn_lock);
+ if (atomic_read(&sess->session_fall_back_to_erl0) ||
+ atomic_read(&sess->session_logout) ||
++ atomic_read(&sess->session_close) ||
+ (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess->conn_lock);
+ continue;
+ }
++ iscsit_inc_session_usage_count(sess);
+ atomic_set(&sess->session_reinstatement, 1);
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
++ atomic_set(&sess->session_close, 1);
+ spin_unlock(&sess->conn_lock);
+
+ list_move_tail(&se_sess->sess_list, &free_list);
+@@ -4694,7 +4659,9 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+
+- iscsit_free_session(sess);
++ list_del_init(&se_sess->sess_list);
++ iscsit_stop_session(sess, 1, 1);
++ iscsit_dec_session_usage_count(sess);
+ session_count++;
+ }
+
+diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
+index c95f56a3ce31..7409ce2a6607 100644
+--- a/drivers/target/iscsi/iscsi_target.h
++++ b/drivers/target/iscsi/iscsi_target.h
+@@ -43,7 +43,6 @@ extern int iscsi_target_rx_thread(void *);
+ extern int iscsit_close_connection(struct iscsi_conn *);
+ extern int iscsit_close_session(struct iscsi_session *);
+ extern void iscsit_fail_session(struct iscsi_session *);
+-extern int iscsit_free_session(struct iscsi_session *);
+ extern void iscsit_stop_session(struct iscsi_session *, int, int);
+ extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
+
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index 42b369fc415e..0fa1d57b26fa 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -1476,20 +1476,23 @@ static void lio_tpg_close_session(struct se_session *se_sess)
+ spin_lock(&sess->conn_lock);
+ if (atomic_read(&sess->session_fall_back_to_erl0) ||
+ atomic_read(&sess->session_logout) ||
++ atomic_read(&sess->session_close) ||
+ (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess->conn_lock);
+ spin_unlock_bh(&se_tpg->session_lock);
+ return;
+ }
++ iscsit_inc_session_usage_count(sess);
+ atomic_set(&sess->session_reinstatement, 1);
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
++ atomic_set(&sess->session_close, 1);
+ spin_unlock(&sess->conn_lock);
+
+ iscsit_stop_time2retain_timer(sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ iscsit_stop_session(sess, 1, 1);
+- iscsit_close_session(sess);
++ iscsit_dec_session_usage_count(sess);
+ }
+
+ static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index f53330813207..731ee67fe914 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -156,6 +156,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+ spin_lock(&sess_p->conn_lock);
+ if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+ atomic_read(&sess_p->session_logout) ||
++ atomic_read(&sess_p->session_close) ||
+ (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess_p->conn_lock);
+ continue;
+@@ -166,6 +167,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+ (sess_p->sess_ops->SessionType == sessiontype))) {
+ atomic_set(&sess_p->session_reinstatement, 1);
+ atomic_set(&sess_p->session_fall_back_to_erl0, 1);
++ atomic_set(&sess_p->session_close, 1);
+ spin_unlock(&sess_p->conn_lock);
+ iscsit_inc_session_usage_count(sess_p);
+ iscsit_stop_time2retain_timer(sess_p);
+@@ -190,7 +192,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+ if (sess->session_state == TARG_SESS_STATE_FAILED) {
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_dec_session_usage_count(sess);
+- iscsit_close_session(sess);
+ return 0;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+@@ -198,7 +199,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
+
+- iscsit_close_session(sess);
+ return 0;
+ }
+
+@@ -486,6 +486,7 @@ static int iscsi_login_non_zero_tsih_s2(
+ sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+ atomic_read(&sess_p->session_logout) ||
++ atomic_read(&sess_p->session_close) ||
+ (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
+ continue;
+ if (!memcmp(sess_p->isid, pdu->isid, 6) &&
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 09d6b11246c9..384f26322609 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2567,10 +2567,8 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
+
+ dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
+
+- if (stop) {
++ if (stop)
+ dwc3_stop_active_transfer(dep, true, true);
+- dep->flags = DWC3_EP_ENABLED;
+- }
+
+ /*
+ * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index e2803094aac8..03fa4b104f52 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -561,8 +561,8 @@ static int should_ignore_root(struct btrfs_root *root)
+ if (!reloc_root)
+ return 0;
+
+- if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
+- root->fs_info->running_transaction->transid - 1)
++ if (btrfs_header_generation(reloc_root->commit_root) ==
++ root->fs_info->running_transaction->transid)
+ return 0;
+ /*
+ * if there is reloc tree and it was created in previous
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 0e8708b77da6..44187caa00c9 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3570,8 +3570,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ (unsigned long long)map->m_lblk, map_len);
+
+ sbi = EXT4_SB(inode->i_sb);
+- eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
+- inode->i_sb->s_blocksize_bits;
++ eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
++ >> inode->i_sb->s_blocksize_bits;
+ if (eof_block < map->m_lblk + map_len)
+ eof_block = map->m_lblk + map_len;
+
+@@ -3826,8 +3826,8 @@ static int ext4_split_convert_extents(handle_t *handle,
+ __func__, inode->i_ino,
+ (unsigned long long)map->m_lblk, map->m_len);
+
+- eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
+- inode->i_sb->s_blocksize_bits;
++ eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
++ >> inode->i_sb->s_blocksize_bits;
+ if (eof_block < map->m_lblk + map->m_len)
+ eof_block = map->m_lblk + map->m_len;
+ /*
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 71e2b80ff4aa..d51b8ae33ad9 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -4112,7 +4112,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+ sbi->s_inodes_per_group > blocksize * 8) {
+ ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
+- sbi->s_blocks_per_group);
++ sbi->s_inodes_per_group);
+ goto failed_mount;
+ }
+ sbi->s_itb_per_group = sbi->s_inodes_per_group /
+@@ -4241,9 +4241,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ EXT4_BLOCKS_PER_GROUP(sb) - 1);
+ do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
+ if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
+- ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
++ ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
+ "(block count %llu, first data block %u, "
+- "blocks per group %lu)", sbi->s_groups_count,
++ "blocks per group %lu)", blocks_count,
+ ext4_blocks_count(es),
+ le32_to_cpu(es->s_first_data_block),
+ EXT4_BLOCKS_PER_GROUP(sb));
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 27373f5792a4..e855d8260433 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -997,9 +997,10 @@ restart_loop:
+ * journalled data) we need to unmap buffer and clear
+ * more bits. We also need to be careful about the check
+ * because the data page mapping can get cleared under
+- * out hands, which alse need not to clear more bits
+- * because the page and buffers will be freed and can
+- * never be reused once we are done with them.
++ * our hands. Note that if mapping == NULL, we don't
++ * need to make buffer unmapped because the page is
++ * already detached from the mapping and buffers cannot
++ * get reused.
+ */
+ mapping = READ_ONCE(bh->b_page->mapping);
+ if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index b045cf1826fc..bb980721502d 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -881,7 +881,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
+ struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL;
+ bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry,
+ oip->index);
+- int fsid = bylower ? oip->lowerpath->layer->fsid : 0;
++ int fsid = bylower ? lowerpath->layer->fsid : 0;
+ bool is_dir, metacopy = false;
+ unsigned long ino = 0;
+ int err = oip->newinode ? -EEXIST : -ENOMEM;
+@@ -931,6 +931,8 @@ struct inode *ovl_get_inode(struct super_block *sb,
+ err = -ENOMEM;
+ goto out_err;
+ }
++ ino = realinode->i_ino;
++ fsid = lowerpath->layer->fsid;
+ }
+ ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev, ino, fsid);
+ ovl_inode_init(inode, upperdentry, lowerdentry, oip->lowerdata);
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index b69c16cbbf71..2d0d91070268 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -254,6 +254,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
+
+ return rt->rt6i_flags & RTF_ANYCAST ||
+ (rt->rt6i_dst.plen < 127 &&
++ !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) &&
+ ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
+ }
+
+diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
+index 64cbbbe74a36..a699b24b08de 100644
+--- a/include/soc/mscc/ocelot.h
++++ b/include/soc/mscc/ocelot.h
+@@ -411,6 +411,8 @@ struct ocelot_port {
+
+ void __iomem *regs;
+
++ bool vlan_aware;
++
+ /* Ingress default VLAN (pvid) */
+ u16 pvid;
+
+@@ -527,7 +529,7 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
+ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+ int ocelot_fdb_add(struct ocelot *ocelot, int port,
+- const unsigned char *addr, u16 vid, bool vlan_aware);
++ const unsigned char *addr, u16 vid);
+ int ocelot_fdb_del(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid);
+ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
+index a49d37140a64..591cd9e4692c 100644
+--- a/include/target/iscsi/iscsi_target_core.h
++++ b/include/target/iscsi/iscsi_target_core.h
+@@ -676,7 +676,7 @@ struct iscsi_session {
+ atomic_t session_logout;
+ atomic_t session_reinstatement;
+ atomic_t session_stop_active;
+- atomic_t sleep_on_sess_wait_comp;
++ atomic_t session_close;
+ /* connection list */
+ struct list_head sess_conn_list;
+ struct list_head cr_active_list;
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index 287d77eae59b..de840de87a18 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -1088,14 +1088,10 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
+ struct event_trigger_data *data,
+ struct trace_event_file *file)
+ {
+- int ret = register_trigger(glob, ops, data, file);
+-
+- if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
+- unregister_trigger(glob, ops, data, file);
+- ret = 0;
+- }
++ if (tracing_alloc_snapshot_instance(file->tr) != 0)
++ return 0;
+
+- return ret;
++ return register_trigger(glob, ops, data, file);
+ }
+
+ static int
+diff --git a/net/bpfilter/main.c b/net/bpfilter/main.c
+index efea4874743e..05e1cfc1e5cd 100644
+--- a/net/bpfilter/main.c
++++ b/net/bpfilter/main.c
+@@ -35,7 +35,6 @@ static void loop(void)
+ struct mbox_reply reply;
+ int n;
+
+- fprintf(debug_f, "testing the buffer\n");
+ n = read(0, &req, sizeof(req));
+ if (n != sizeof(req)) {
+ fprintf(debug_f, "invalid request %d\n", n);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 9d3fddbc7037..fc8459316c3d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4090,7 +4090,8 @@ EXPORT_SYMBOL(netdev_max_backlog);
+
+ int netdev_tstamp_prequeue __read_mostly = 1;
+ int netdev_budget __read_mostly = 300;
+-unsigned int __read_mostly netdev_budget_usecs = 2000;
++/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
++unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
+ int weight_p __read_mostly = 64; /* old backlog weight */
+ int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
+ int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
+diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
+index fae21c863b1f..55c0b2e872a5 100644
+--- a/net/hsr/hsr_netlink.c
++++ b/net/hsr/hsr_netlink.c
+@@ -61,10 +61,16 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
+ else
+ multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
+
+- if (!data[IFLA_HSR_VERSION])
++ if (!data[IFLA_HSR_VERSION]) {
+ hsr_version = 0;
+- else
++ } else {
+ hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
++ if (hsr_version > 1) {
++ NL_SET_ERR_MSG_MOD(extack,
++ "Only versions 0..1 are supported");
++ return -EINVAL;
++ }
++ }
+
+ return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
+ }
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index e4632bd2026d..458dc6eb5a68 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -614,12 +614,15 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
+ return NULL;
+ }
+
+-static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
++static int ip_mc_autojoin_config(struct net *net, bool join,
++ const struct in_ifaddr *ifa)
+ {
++#if defined(CONFIG_IP_MULTICAST)
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = ifa->ifa_address,
+ .imr_ifindex = ifa->ifa_dev->dev->ifindex,
+ };
++ struct sock *sk = net->ipv4.mc_autojoin_sk;
+ int ret;
+
+ ASSERT_RTNL();
+@@ -632,6 +635,9 @@ static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
+ release_sock(sk);
+
+ return ret;
++#else
++ return -EOPNOTSUPP;
++#endif
+ }
+
+ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
+@@ -675,7 +681,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
+ continue;
+
+ if (ipv4_is_multicast(ifa->ifa_address))
+- ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
++ ip_mc_autojoin_config(net, false, ifa);
+ __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
+ return 0;
+ }
+@@ -940,8 +946,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
+ */
+ set_ifa_lifetime(ifa, valid_lft, prefered_lft);
+ if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
+- int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
+- true, ifa);
++ int ret = ip_mc_autojoin_config(net, true, ifa);
+
+ if (ret < 0) {
+ inet_free_ifa(ifa);
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index ef408a5090a2..c9504ec6a8d8 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -229,6 +229,25 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
+ return res;
+ }
+
++static bool icmpv6_rt_has_prefsrc(struct sock *sk, u8 type,
++ struct flowi6 *fl6)
++{
++ struct net *net = sock_net(sk);
++ struct dst_entry *dst;
++ bool res = false;
++
++ dst = ip6_route_output(net, sk, fl6);
++ if (!dst->error) {
++ struct rt6_info *rt = (struct rt6_info *)dst;
++ struct in6_addr prefsrc;
++
++ rt6_get_prefsrc(rt, &prefsrc);
++ res = !ipv6_addr_any(&prefsrc);
++ }
++ dst_release(dst);
++ return res;
++}
++
+ /*
+ * an inline helper for the "simple" if statement below
+ * checks if parameter problem report is caused by an
+@@ -527,7 +546,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ saddr = force_saddr;
+ if (saddr) {
+ fl6.saddr = *saddr;
+- } else {
++ } else if (!icmpv6_rt_has_prefsrc(sk, type, &fl6)) {
+ /* select a more meaningful saddr from input if */
+ struct net_device *in_netdev;
+
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index f5a9bdc4980c..ebb381c3f1b9 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -920,51 +920,51 @@ static const struct genl_ops l2tp_nl_ops[] = {
+ .cmd = L2TP_CMD_TUNNEL_CREATE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_tunnel_create,
+- .flags = GENL_ADMIN_PERM,
++ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_DELETE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_tunnel_delete,
+- .flags = GENL_ADMIN_PERM,
++ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_MODIFY,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_tunnel_modify,
+- .flags = GENL_ADMIN_PERM,
++ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_tunnel_get,
+ .dumpit = l2tp_nl_cmd_tunnel_dump,
+- .flags = GENL_ADMIN_PERM,
++ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_CREATE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_session_create,
+- .flags = GENL_ADMIN_PERM,
++ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_DELETE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_session_delete,
+- .flags = GENL_ADMIN_PERM,
++ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_MODIFY,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_session_modify,
+- .flags = GENL_ADMIN_PERM,
++ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_session_get,
+ .dumpit = l2tp_nl_cmd_session_dump,
+- .flags = GENL_ADMIN_PERM,
++ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ };
+
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 4c2b5ba3ac09..a14aef11ffb8 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -1051,7 +1051,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+ if (hw->max_signal <= 0) {
+ result = -EINVAL;
+- goto fail_wiphy_register;
++ goto fail_workqueue;
+ }
+ }
+
+@@ -1113,7 +1113,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+
+ result = ieee80211_init_cipher_suites(local);
+ if (result < 0)
+- goto fail_wiphy_register;
++ goto fail_workqueue;
+
+ if (!local->ops->remain_on_channel)
+ local->hw.wiphy->max_remain_on_channel_duration = 5000;
+@@ -1139,10 +1139,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+
+ local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CSA_COUNTERS_NUM;
+
+- result = wiphy_register(local->hw.wiphy);
+- if (result < 0)
+- goto fail_wiphy_register;
+-
+ /*
+ * We use the number of queues for feature tests (QoS, HT) internally
+ * so restrict them appropriately.
+@@ -1198,9 +1194,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ goto fail_flows;
+
+ rtnl_lock();
+-
+ result = ieee80211_init_rate_ctrl_alg(local,
+ hw->rate_control_algorithm);
++ rtnl_unlock();
+ if (result < 0) {
+ wiphy_debug(local->hw.wiphy,
+ "Failed to initialize rate control algorithm\n");
+@@ -1254,6 +1250,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ local->sband_allocated |= BIT(band);
+ }
+
++ result = wiphy_register(local->hw.wiphy);
++ if (result < 0)
++ goto fail_wiphy_register;
++
++ rtnl_lock();
++
+ /* add one default STA interface if supported */
+ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) &&
+ !ieee80211_hw_check(hw, NO_AUTO_VIF)) {
+@@ -1293,17 +1295,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ #if defined(CONFIG_INET) || defined(CONFIG_IPV6)
+ fail_ifa:
+ #endif
++ wiphy_unregister(local->hw.wiphy);
++ fail_wiphy_register:
+ rtnl_lock();
+ rate_control_deinitialize(local);
+ ieee80211_remove_interfaces(local);
+- fail_rate:
+ rtnl_unlock();
++ fail_rate:
+ fail_flows:
+ ieee80211_led_exit(local);
+ destroy_workqueue(local->workqueue);
+ fail_workqueue:
+- wiphy_unregister(local->hw.wiphy);
+- fail_wiphy_register:
+ if (local->wiphy_ciphers_allocated)
+ kfree(local->hw.wiphy->cipher_suites);
+ kfree(local->int_scan_req);
+@@ -1353,8 +1355,8 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
+ skb_queue_purge(&local->skb_queue_unreliable);
+ skb_queue_purge(&local->skb_queue_tdls_chsw);
+
+- destroy_workqueue(local->workqueue);
+ wiphy_unregister(local->hw.wiphy);
++ destroy_workqueue(local->workqueue);
+ ieee80211_led_exit(local);
+ kfree(local->int_scan_req);
+ }
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index 3d24d45be5f4..930f48a20546 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -763,20 +763,21 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+
+ node = NULL;
+ if (addr->sq_node == QRTR_NODE_BCAST) {
+- enqueue_fn = qrtr_bcast_enqueue;
+- if (addr->sq_port != QRTR_PORT_CTRL) {
++ if (addr->sq_port != QRTR_PORT_CTRL &&
++ qrtr_local_nid != QRTR_NODE_BCAST) {
+ release_sock(sk);
+ return -ENOTCONN;
+ }
++ enqueue_fn = qrtr_bcast_enqueue;
+ } else if (addr->sq_node == ipc->us.sq_node) {
+ enqueue_fn = qrtr_local_enqueue;
+ } else {
+- enqueue_fn = qrtr_node_enqueue;
+ node = qrtr_node_lookup(addr->sq_node);
+ if (!node) {
+ release_sock(sk);
+ return -ECONNRESET;
+ }
++ enqueue_fn = qrtr_node_enqueue;
+ }
+
+ plen = (len + 3) & ~3;
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index fe7aa8637c29..347f11cdd83a 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -618,10 +618,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ [NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY,
+ .len = NL80211_HE_MAX_CAPABILITY_LEN },
+
+- [NL80211_ATTR_FTM_RESPONDER] = {
+- .type = NLA_NESTED,
+- .validation_data = nl80211_ftm_responder_policy,
+- },
++ [NL80211_ATTR_FTM_RESPONDER] =
++ NLA_POLICY_NESTED(nl80211_ftm_responder_policy),
+ [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1),
+ [NL80211_ATTR_PEER_MEASUREMENTS] =
+ NLA_POLICY_NESTED(nl80211_pmsr_attr_policy),
+diff --git a/security/keys/proc.c b/security/keys/proc.c
+index 415f3f1c2da0..d0cde6685627 100644
+--- a/security/keys/proc.c
++++ b/security/keys/proc.c
+@@ -139,6 +139,8 @@ static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
+ n = key_serial_next(p, v);
+ if (n)
+ *_pos = key_node_serial(n);
++ else
++ (*_pos)++;
+ return n;
+ }
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 3bb9821f1a48..1ef5c3be5686 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7253,6 +7253,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
+ SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+ SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
+diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
+index baef461a99f1..f883c9340eee 100644
+--- a/sound/soc/intel/atom/sst-atom-controls.c
++++ b/sound/soc/intel/atom/sst-atom-controls.c
+@@ -1333,7 +1333,7 @@ int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute)
+ dai->capture_widget->name);
+ w = dai->capture_widget;
+ snd_soc_dapm_widget_for_each_source_path(w, p) {
+- if (p->connected && !p->connected(w, p->sink))
++ if (p->connected && !p->connected(w, p->source))
+ continue;
+
+ if (p->connect && p->source->power &&
+diff --git a/sound/soc/intel/atom/sst/sst_pci.c b/sound/soc/intel/atom/sst/sst_pci.c
+index d952719bc098..5862fe968083 100644
+--- a/sound/soc/intel/atom/sst/sst_pci.c
++++ b/sound/soc/intel/atom/sst/sst_pci.c
+@@ -99,7 +99,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
+ dev_dbg(ctx->dev, "DRAM Ptr %p\n", ctx->dram);
+ do_release_regions:
+ pci_release_regions(pci);
+- return 0;
++ return ret;
+ }
+
+ /*
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index d2a050bb8341..5661994e13e7 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1446,7 +1446,7 @@ error:
+ usb_audio_err(chip,
+ "cannot get connectors status: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
+ UAC_GET_CUR, validx, idx, cval->val_type);
+- return ret;
++ return filter_error(cval, ret);
+ }
+
+ ucontrol->value.integer.value[0] = val;
+@@ -1750,11 +1750,15 @@ static void get_connector_control_name(struct usb_mixer_interface *mixer,
+
+ /* Build a mixer control for a UAC connector control (jack-detect) */
+ static void build_connector_control(struct usb_mixer_interface *mixer,
++ const struct usbmix_name_map *imap,
+ struct usb_audio_term *term, bool is_input)
+ {
+ struct snd_kcontrol *kctl;
+ struct usb_mixer_elem_info *cval;
+
++ if (check_ignored_ctl(find_map(imap, term->id, 0)))
++ return;
++
+ cval = kzalloc(sizeof(*cval), GFP_KERNEL);
+ if (!cval)
+ return;
+@@ -2088,8 +2092,9 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid,
+ check_input_term(state, term_id, &iterm);
+
+ /* Check for jack detection. */
+- if (uac_v2v3_control_is_readable(bmctls, control))
+- build_connector_control(state->mixer, &iterm, true);
++ if ((iterm.type & 0xff00) != 0x0100 &&
++ uac_v2v3_control_is_readable(bmctls, control))
++ build_connector_control(state->mixer, state->map, &iterm, true);
+
+ return 0;
+ }
+@@ -3050,13 +3055,13 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer,
+ memset(&iterm, 0, sizeof(iterm));
+ iterm.id = UAC3_BADD_IT_ID4;
+ iterm.type = UAC_BIDIR_TERMINAL_HEADSET;
+- build_connector_control(mixer, &iterm, true);
++ build_connector_control(mixer, map->map, &iterm, true);
+
+ /* Output Term - Insertion control */
+ memset(&oterm, 0, sizeof(oterm));
+ oterm.id = UAC3_BADD_OT_ID3;
+ oterm.type = UAC_BIDIR_TERMINAL_HEADSET;
+- build_connector_control(mixer, &oterm, false);
++ build_connector_control(mixer, map->map, &oterm, false);
+ }
+
+ return 0;
+@@ -3085,7 +3090,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
+ if (map->id == state.chip->usb_id) {
+ state.map = map->map;
+ state.selector_map = map->selector_map;
+- mixer->ignore_ctl_error = map->ignore_ctl_error;
++ mixer->ignore_ctl_error |= map->ignore_ctl_error;
+ break;
+ }
+ }
+@@ -3128,10 +3133,11 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
+ if (err < 0 && err != -EINVAL)
+ return err;
+
+- if (uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls),
++ if ((state.oterm.type & 0xff00) != 0x0100 &&
++ uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls),
+ UAC2_TE_CONNECTOR)) {
+- build_connector_control(state.mixer, &state.oterm,
+- false);
++ build_connector_control(state.mixer, state.map,
++ &state.oterm, false);
+ }
+ } else { /* UAC_VERSION_3 */
+ struct uac3_output_terminal_descriptor *desc = p;
+@@ -3153,10 +3159,11 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
+ if (err < 0 && err != -EINVAL)
+ return err;
+
+- if (uac_v2v3_control_is_readable(le32_to_cpu(desc->bmControls),
++ if ((state.oterm.type & 0xff00) != 0x0100 &&
++ uac_v2v3_control_is_readable(le32_to_cpu(desc->bmControls),
+ UAC3_TE_INSERTION)) {
+- build_connector_control(state.mixer, &state.oterm,
+- false);
++ build_connector_control(state.mixer, state.map,
++ &state.oterm, false);
+ }
+ }
+ }
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index f6a67eecb063..3a54ca04ec4c 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -350,9 +350,11 @@ static const struct usbmix_name_map dell_alc4020_map[] = {
+ };
+
+ /* Some mobos shipped with a dummy HD-audio show the invalid GET_MIN/GET_MAX
+- * response for Input Gain Pad (id=19, control=12). Skip it.
++ * response for Input Gain Pad (id=19, control=12) and the connector status
++ * for SPDIF terminal (id=18). Skip them.
+ */
+ static const struct usbmix_name_map asus_rog_map[] = {
++ { 18, NULL }, /* OT, connector control */
+ { 19, NULL, 12 }, /* FU, Input Gain Pad */
+ {}
+ };
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index 66cd97cc8b92..89682224ecd0 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -185,24 +185,23 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
+ {
+ struct hist_entry *he = iter->he;
+ struct report *rep = arg;
+- struct branch_info *bi;
++ struct branch_info *bi = he->branch_info;
+ struct perf_sample *sample = iter->sample;
+ struct evsel *evsel = iter->evsel;
+ int err;
+
++ branch_type_count(&rep->brtype_stat, &bi->flags,
++ bi->from.addr, bi->to.addr);
++
+ if (!ui__has_annotation() && !rep->symbol_ipc)
+ return 0;
+
+- bi = he->branch_info;
+ err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
+ if (err)
+ goto out;
+
+ err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
+
+- branch_type_count(&rep->brtype_stat, &bi->flags,
+- bi->from.addr, bi->to.addr);
+-
+ out:
+ return err;
+ }