summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2023-12-13 13:27:06 -0500
committerMike Pagano <mpagano@gentoo.org>2023-12-13 13:27:06 -0500
commit1b001ef904a50ea1a63ca48ab21fb6ecd6d3d362 (patch)
tree2e1fd7f813a09b5e914260198bb0aff63ecff5cd
parentLinux patch 6.1.67 (diff)
downloadlinux-patches-1b001ef904a50ea1a63ca48ab21fb6ecd6d3d362.tar.gz
linux-patches-1b001ef904a50ea1a63ca48ab21fb6ecd6d3d362.tar.bz2
linux-patches-1b001ef904a50ea1a63ca48ab21fb6ecd6d3d362.zip
Linux patch 6.1.686.1-76
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1067_linux-6.1.68.patch9166
2 files changed, 9170 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 5f228fc4..a8206ed1 100644
--- a/0000_README
+++ b/0000_README
@@ -311,6 +311,10 @@ Patch: 1066_linux-6.1.67.patch
From: https://www.kernel.org
Desc: Linux 6.1.67
+Patch: 1067_linux-6.1.68.patch
+From: https://www.kernel.org
+Desc: Linux 6.1.68
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1067_linux-6.1.68.patch b/1067_linux-6.1.68.patch
new file mode 100644
index 00000000..78edec28
--- /dev/null
+++ b/1067_linux-6.1.68.patch
@@ -0,0 +1,9166 @@
+diff --git a/Documentation/ABI/testing/sysfs-bus-optee-devices b/Documentation/ABI/testing/sysfs-bus-optee-devices
+index 0f58701367b66..af31e5a22d89f 100644
+--- a/Documentation/ABI/testing/sysfs-bus-optee-devices
++++ b/Documentation/ABI/testing/sysfs-bus-optee-devices
+@@ -6,3 +6,12 @@ Description:
+ OP-TEE bus provides reference to registered drivers under this directory. The <uuid>
+ matches Trusted Application (TA) driver and corresponding TA in secure OS. Drivers
+ are free to create needed API under optee-ta-<uuid> directory.
++
++What: /sys/bus/tee/devices/optee-ta-<uuid>/need_supplicant
++Date: November 2023
++KernelVersion: 6.7
++Contact: op-tee@lists.trustedfirmware.org
++Description:
++ Allows to distinguish whether an OP-TEE based TA/device requires user-space
++ tee-supplicant to function properly or not. This attribute will be present for
++ devices which depend on tee-supplicant to be running.
+diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml b/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
+index 509d20c091af8..6a206111d4e0f 100644
+--- a/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
++++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
+@@ -62,6 +62,9 @@ properties:
+ - description: MPM pin number
+ - description: GIC SPI number for the MPM pin
+
++ '#power-domain-cells':
++ const: 0
++
+ required:
+ - compatible
+ - reg
+@@ -93,4 +96,5 @@ examples:
+ <86 183>,
+ <90 260>,
+ <91 260>;
++ #power-domain-cells = <0>;
+ };
+diff --git a/Makefile b/Makefile
+index c27600b90cad2..2a8ad0cec2f1c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 67
++SUBLEVEL = 68
+ EXTRAVERSION =
+ NAME = Curry Ramen
+
+diff --git a/arch/arm/boot/dts/imx28-xea.dts b/arch/arm/boot/dts/imx28-xea.dts
+index a400c108f66a2..6c5e6856648af 100644
+--- a/arch/arm/boot/dts/imx28-xea.dts
++++ b/arch/arm/boot/dts/imx28-xea.dts
+@@ -8,6 +8,7 @@
+ #include "imx28-lwe.dtsi"
+
+ / {
++ model = "Liebherr XEA board";
+ compatible = "lwn,imx28-xea", "fsl,imx28";
+ };
+
+diff --git a/arch/arm/boot/dts/imx6ul-pico.dtsi b/arch/arm/boot/dts/imx6ul-pico.dtsi
+index 357ffb2f5ad61..dd6790852b0d6 100644
+--- a/arch/arm/boot/dts/imx6ul-pico.dtsi
++++ b/arch/arm/boot/dts/imx6ul-pico.dtsi
+@@ -121,6 +121,8 @@
+ max-speed = <100>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
++ clocks = <&clks IMX6UL_CLK_ENET_REF>;
++ clock-names = "rmii-ref";
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index 667568aa4326a..45947707134b8 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -454,7 +454,7 @@
+ };
+
+ gpt1: timer@302d0000 {
+- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ reg = <0x302d0000 0x10000>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
+@@ -463,7 +463,7 @@
+ };
+
+ gpt2: timer@302e0000 {
+- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ reg = <0x302e0000 0x10000>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
+@@ -473,7 +473,7 @@
+ };
+
+ gpt3: timer@302f0000 {
+- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ reg = <0x302f0000 0x10000>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
+@@ -483,7 +483,7 @@
+ };
+
+ gpt4: timer@30300000 {
+- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ reg = <0x30300000 0x10000>;
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
+diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
+index b9efe9da06e0b..3d76e8c28c51d 100644
+--- a/arch/arm/mach-imx/mmdc.c
++++ b/arch/arm/mach-imx/mmdc.c
+@@ -502,6 +502,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+
+ name = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL, "mmdc%d", ret);
++ if (!name) {
++ ret = -ENOMEM;
++ goto pmu_release_id;
++ }
+
+ pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
+ pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
+@@ -524,9 +528,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+
+ pmu_register_err:
+ pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
+- ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
+ cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
+ hrtimer_cancel(&pmu_mmdc->hrtimer);
++pmu_release_id:
++ ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
+ pmu_free:
+ kfree(pmu_mmdc);
+ return ret;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+index 25630a395db56..8c34b3e12a66a 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+@@ -1301,6 +1301,7 @@
+ phys = <&usb3_phy0>, <&usb3_phy0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,gfladj-refclk-lpm-sel-quirk;
++ snps,parkmode-disable-ss-quirk;
+ };
+
+ };
+@@ -1343,6 +1344,7 @@
+ phys = <&usb3_phy1>, <&usb3_phy1>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,gfladj-refclk-lpm-sel-quirk;
++ snps,parkmode-disable-ss-quirk;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+index bf8f02c1535c1..e642cb7d54d77 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+@@ -1431,7 +1431,7 @@
+ phys = <&usb3_phy0>, <&usb3_phy0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ power-domains = <&pgc_otg1>;
+- usb3-resume-missing-cas;
++ snps,parkmode-disable-ss-quirk;
+ status = "disabled";
+ };
+
+@@ -1463,7 +1463,7 @@
+ phys = <&usb3_phy1>, <&usb3_phy1>;
+ phy-names = "usb2-phy", "usb3-phy";
+ power-domains = <&pgc_otg2>;
+- usb3-resume-missing-cas;
++ snps,parkmode-disable-ss-quirk;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+index d3f9eab2b7844..2c35ed0734a47 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+@@ -72,7 +72,7 @@
+ };
+ };
+
+- memory {
++ memory@40000000 {
+ reg = <0 0x40000000 0 0x40000000>;
+ };
+
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+index 36722cabe626e..f9313b697ac12 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+@@ -54,7 +54,7 @@
+ };
+ };
+
+- memory {
++ memory@40000000 {
+ reg = <0 0x40000000 0 0x20000000>;
+ };
+
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+index 0b5f154007be8..49c7185243cc1 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+@@ -43,7 +43,7 @@
+ id-gpio = <&pio 16 GPIO_ACTIVE_HIGH>;
+ };
+
+- usb_p1_vbus: regulator@0 {
++ usb_p1_vbus: regulator-usb-p1 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_vbus";
+ regulator-min-microvolt = <5000000>;
+@@ -52,7 +52,7 @@
+ enable-active-high;
+ };
+
+- usb_p0_vbus: regulator@1 {
++ usb_p0_vbus: regulator-usb-p0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vbus";
+ regulator-min-microvolt = <5000000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
+index 52dc4a50e34d3..2ca0da51efaa0 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
+@@ -30,14 +30,14 @@
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+- scp_mem_reserved: scp_mem_region {
++ scp_mem_reserved: memory@50000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x50000000 0 0x2900000>;
+ no-map;
+ };
+ };
+
+- ntc@0 {
++ thermal-sensor {
+ compatible = "murata,ncp03wf104";
+ pullup-uv = <1800000>;
+ pullup-ohm = <390000>;
+@@ -139,8 +139,8 @@
+ };
+
+ &pio {
+- i2c_pins_0: i2c0{
+- pins_i2c{
++ i2c_pins_0: i2c0 {
++ pins_i2c {
+ pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
+ <PINMUX_GPIO83__FUNC_SCL0>;
+ mediatek,pull-up-adv = <3>;
+@@ -148,8 +148,8 @@
+ };
+ };
+
+- i2c_pins_1: i2c1{
+- pins_i2c{
++ i2c_pins_1: i2c1 {
++ pins_i2c {
+ pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
+ <PINMUX_GPIO84__FUNC_SCL1>;
+ mediatek,pull-up-adv = <3>;
+@@ -157,8 +157,8 @@
+ };
+ };
+
+- i2c_pins_2: i2c2{
+- pins_i2c{
++ i2c_pins_2: i2c2 {
++ pins_i2c {
+ pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
+ <PINMUX_GPIO104__FUNC_SDA2>;
+ mediatek,pull-up-adv = <3>;
+@@ -166,8 +166,8 @@
+ };
+ };
+
+- i2c_pins_3: i2c3{
+- pins_i2c{
++ i2c_pins_3: i2c3 {
++ pins_i2c {
+ pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
+ <PINMUX_GPIO51__FUNC_SDA3>;
+ mediatek,pull-up-adv = <3>;
+@@ -175,8 +175,8 @@
+ };
+ };
+
+- i2c_pins_4: i2c4{
+- pins_i2c{
++ i2c_pins_4: i2c4 {
++ pins_i2c {
+ pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
+ <PINMUX_GPIO106__FUNC_SDA4>;
+ mediatek,pull-up-adv = <3>;
+@@ -184,8 +184,8 @@
+ };
+ };
+
+- i2c_pins_5: i2c5{
+- pins_i2c{
++ i2c_pins_5: i2c5 {
++ pins_i2c {
+ pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
+ <PINMUX_GPIO49__FUNC_SDA5>;
+ mediatek,pull-up-adv = <3>;
+@@ -193,8 +193,8 @@
+ };
+ };
+
+- spi_pins_0: spi0{
+- pins_spi{
++ spi_pins_0: spi0 {
++ pins_spi {
+ pinmux = <PINMUX_GPIO85__FUNC_SPI0_MI>,
+ <PINMUX_GPIO86__FUNC_SPI0_CSB>,
+ <PINMUX_GPIO87__FUNC_SPI0_MO>,
+@@ -308,8 +308,8 @@
+ };
+ };
+
+- spi_pins_1: spi1{
+- pins_spi{
++ spi_pins_1: spi1 {
++ pins_spi {
+ pinmux = <PINMUX_GPIO161__FUNC_SPI1_A_MI>,
+ <PINMUX_GPIO162__FUNC_SPI1_A_CSB>,
+ <PINMUX_GPIO163__FUNC_SPI1_A_MO>,
+@@ -318,8 +318,8 @@
+ };
+ };
+
+- spi_pins_2: spi2{
+- pins_spi{
++ spi_pins_2: spi2 {
++ pins_spi {
+ pinmux = <PINMUX_GPIO0__FUNC_SPI2_CSB>,
+ <PINMUX_GPIO1__FUNC_SPI2_MO>,
+ <PINMUX_GPIO2__FUNC_SPI2_CLK>,
+@@ -328,8 +328,8 @@
+ };
+ };
+
+- spi_pins_3: spi3{
+- pins_spi{
++ spi_pins_3: spi3 {
++ pins_spi {
+ pinmux = <PINMUX_GPIO21__FUNC_SPI3_MI>,
+ <PINMUX_GPIO22__FUNC_SPI3_CSB>,
+ <PINMUX_GPIO23__FUNC_SPI3_MO>,
+@@ -338,8 +338,8 @@
+ };
+ };
+
+- spi_pins_4: spi4{
+- pins_spi{
++ spi_pins_4: spi4 {
++ pins_spi {
+ pinmux = <PINMUX_GPIO17__FUNC_SPI4_MI>,
+ <PINMUX_GPIO18__FUNC_SPI4_CSB>,
+ <PINMUX_GPIO19__FUNC_SPI4_MO>,
+@@ -348,8 +348,8 @@
+ };
+ };
+
+- spi_pins_5: spi5{
+- pins_spi{
++ spi_pins_5: spi5 {
++ pins_spi {
+ pinmux = <PINMUX_GPIO13__FUNC_SPI5_MI>,
+ <PINMUX_GPIO14__FUNC_SPI5_CSB>,
+ <PINMUX_GPIO15__FUNC_SPI5_MO>,
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+index 3ac83be536274..dccf367c7ec6c 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+@@ -101,6 +101,8 @@
+
+ &dsi0 {
+ status = "okay";
++ /delete-property/#size-cells;
++ /delete-property/#address-cells;
+ /delete-node/panel@0;
+ ports {
+ port {
+@@ -437,20 +439,20 @@
+ };
+
+ touchscreen_pins: touchscreen-pins {
+- touch_int_odl {
++ touch-int-odl {
+ pinmux = <PINMUX_GPIO155__FUNC_GPIO155>;
+ input-enable;
+ bias-pull-up;
+ };
+
+- touch_rst_l {
++ touch-rst-l {
+ pinmux = <PINMUX_GPIO156__FUNC_GPIO156>;
+ output-high;
+ };
+ };
+
+ trackpad_pins: trackpad-pins {
+- trackpad_int {
++ trackpad-int {
+ pinmux = <PINMUX_GPIO7__FUNC_GPIO7>;
+ input-enable;
+ bias-disable; /* pulled externally */
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+index 632fd89e75969..a428a581c93a8 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+@@ -108,7 +108,7 @@
+ #size-cells = <2>;
+ ranges;
+
+- scp_mem_reserved: scp_mem_region {
++ scp_mem_reserved: memory@50000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x50000000 0 0x2900000>;
+ no-map;
+@@ -423,7 +423,7 @@
+
+ &pio {
+ aud_pins_default: audiopins {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO97__FUNC_I2S2_MCK>,
+ <PINMUX_GPIO98__FUNC_I2S2_BCK>,
+ <PINMUX_GPIO101__FUNC_I2S2_LRCK>,
+@@ -445,7 +445,7 @@
+ };
+
+ aud_pins_tdm_out_on: audiotdmouton {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO169__FUNC_TDM_BCK_2ND>,
+ <PINMUX_GPIO170__FUNC_TDM_LRCK_2ND>,
+ <PINMUX_GPIO171__FUNC_TDM_DATA0_2ND>,
+@@ -457,7 +457,7 @@
+ };
+
+ aud_pins_tdm_out_off: audiotdmoutoff {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO169__FUNC_GPIO169>,
+ <PINMUX_GPIO170__FUNC_GPIO170>,
+ <PINMUX_GPIO171__FUNC_GPIO171>,
+@@ -471,13 +471,13 @@
+ };
+
+ bt_pins: bt-pins {
+- pins_bt_en {
++ pins-bt-en {
+ pinmux = <PINMUX_GPIO120__FUNC_GPIO120>;
+ output-low;
+ };
+ };
+
+- ec_ap_int_odl: ec_ap_int_odl {
++ ec_ap_int_odl: ec-ap-int-odl {
+ pins1 {
+ pinmux = <PINMUX_GPIO151__FUNC_GPIO151>;
+ input-enable;
+@@ -485,7 +485,7 @@
+ };
+ };
+
+- h1_int_od_l: h1_int_od_l {
++ h1_int_od_l: h1-int-od-l {
+ pins1 {
+ pinmux = <PINMUX_GPIO153__FUNC_GPIO153>;
+ input-enable;
+@@ -493,7 +493,7 @@
+ };
+
+ i2c0_pins: i2c0 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
+ <PINMUX_GPIO83__FUNC_SCL0>;
+ mediatek,pull-up-adv = <3>;
+@@ -502,7 +502,7 @@
+ };
+
+ i2c1_pins: i2c1 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
+ <PINMUX_GPIO84__FUNC_SCL1>;
+ mediatek,pull-up-adv = <3>;
+@@ -511,7 +511,7 @@
+ };
+
+ i2c2_pins: i2c2 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
+ <PINMUX_GPIO104__FUNC_SDA2>;
+ bias-disable;
+@@ -520,7 +520,7 @@
+ };
+
+ i2c3_pins: i2c3 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
+ <PINMUX_GPIO51__FUNC_SDA3>;
+ mediatek,pull-up-adv = <3>;
+@@ -529,7 +529,7 @@
+ };
+
+ i2c4_pins: i2c4 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
+ <PINMUX_GPIO106__FUNC_SDA4>;
+ bias-disable;
+@@ -538,7 +538,7 @@
+ };
+
+ i2c5_pins: i2c5 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
+ <PINMUX_GPIO49__FUNC_SDA5>;
+ mediatek,pull-up-adv = <3>;
+@@ -547,7 +547,7 @@
+ };
+
+ i2c6_pins: i2c6 {
+- pins_bus {
++ pins-bus {
+ pinmux = <PINMUX_GPIO11__FUNC_SCL6>,
+ <PINMUX_GPIO12__FUNC_SDA6>;
+ bias-disable;
+@@ -555,7 +555,7 @@
+ };
+
+ mmc0_pins_default: mmc0-pins-default {
+- pins_cmd_dat {
++ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
+ <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
+ <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
+@@ -570,13 +570,13 @@
+ mediatek,pull-up-adv = <01>;
+ };
+
+- pins_clk {
++ pins-clk {
+ pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-down-adv = <10>;
+ };
+
+- pins_rst {
++ pins-rst {
+ pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-down-adv = <01>;
+@@ -584,7 +584,7 @@
+ };
+
+ mmc0_pins_uhs: mmc0-pins-uhs {
+- pins_cmd_dat {
++ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
+ <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
+ <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
+@@ -599,19 +599,19 @@
+ mediatek,pull-up-adv = <01>;
+ };
+
+- pins_clk {
++ pins-clk {
+ pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-down-adv = <10>;
+ };
+
+- pins_ds {
++ pins-ds {
+ pinmux = <PINMUX_GPIO131__FUNC_MSDC0_DSL>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-down-adv = <10>;
+ };
+
+- pins_rst {
++ pins-rst {
+ pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-up-adv = <01>;
+@@ -619,7 +619,7 @@
+ };
+
+ mmc1_pins_default: mmc1-pins-default {
+- pins_cmd_dat {
++ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
+ <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
+ <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
+@@ -629,7 +629,7 @@
+ mediatek,pull-up-adv = <10>;
+ };
+
+- pins_clk {
++ pins-clk {
+ pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
+ input-enable;
+ mediatek,pull-down-adv = <10>;
+@@ -637,7 +637,7 @@
+ };
+
+ mmc1_pins_uhs: mmc1-pins-uhs {
+- pins_cmd_dat {
++ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
+ <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
+ <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
+@@ -648,7 +648,7 @@
+ mediatek,pull-up-adv = <10>;
+ };
+
+- pins_clk {
++ pins-clk {
+ pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
+ drive-strength = <MTK_DRIVE_8mA>;
+ mediatek,pull-down-adv = <10>;
+@@ -656,15 +656,15 @@
+ };
+ };
+
+- panel_pins_default: panel_pins_default {
+- panel_reset {
++ panel_pins_default: panel-pins-default {
++ panel-reset {
+ pinmux = <PINMUX_GPIO45__FUNC_GPIO45>;
+ output-low;
+ bias-pull-up;
+ };
+ };
+
+- pwm0_pin_default: pwm0_pin_default {
++ pwm0_pin_default: pwm0-pin-default {
+ pins1 {
+ pinmux = <PINMUX_GPIO176__FUNC_GPIO176>;
+ output-high;
+@@ -676,14 +676,14 @@
+ };
+
+ scp_pins: scp {
+- pins_scp_uart {
++ pins-scp-uart {
+ pinmux = <PINMUX_GPIO110__FUNC_TP_URXD1_AO>,
+ <PINMUX_GPIO112__FUNC_TP_UTXD1_AO>;
+ };
+ };
+
+ spi0_pins: spi0 {
+- pins_spi{
++ pins-spi {
+ pinmux = <PINMUX_GPIO85__FUNC_SPI0_MI>,
+ <PINMUX_GPIO86__FUNC_GPIO86>,
+ <PINMUX_GPIO87__FUNC_SPI0_MO>,
+@@ -693,7 +693,7 @@
+ };
+
+ spi1_pins: spi1 {
+- pins_spi{
++ pins-spi {
+ pinmux = <PINMUX_GPIO161__FUNC_SPI1_A_MI>,
+ <PINMUX_GPIO162__FUNC_SPI1_A_CSB>,
+ <PINMUX_GPIO163__FUNC_SPI1_A_MO>,
+@@ -703,20 +703,20 @@
+ };
+
+ spi2_pins: spi2 {
+- pins_spi{
++ pins-spi {
+ pinmux = <PINMUX_GPIO0__FUNC_SPI2_CSB>,
+ <PINMUX_GPIO1__FUNC_SPI2_MO>,
+ <PINMUX_GPIO2__FUNC_SPI2_CLK>;
+ bias-disable;
+ };
+- pins_spi_mi {
++ pins-spi-mi {
+ pinmux = <PINMUX_GPIO94__FUNC_SPI2_MI>;
+ mediatek,pull-down-adv = <00>;
+ };
+ };
+
+ spi3_pins: spi3 {
+- pins_spi{
++ pins-spi {
+ pinmux = <PINMUX_GPIO21__FUNC_SPI3_MI>,
+ <PINMUX_GPIO22__FUNC_SPI3_CSB>,
+ <PINMUX_GPIO23__FUNC_SPI3_MO>,
+@@ -726,7 +726,7 @@
+ };
+
+ spi4_pins: spi4 {
+- pins_spi{
++ pins-spi {
+ pinmux = <PINMUX_GPIO17__FUNC_SPI4_MI>,
+ <PINMUX_GPIO18__FUNC_SPI4_CSB>,
+ <PINMUX_GPIO19__FUNC_SPI4_MO>,
+@@ -736,7 +736,7 @@
+ };
+
+ spi5_pins: spi5 {
+- pins_spi{
++ pins-spi {
+ pinmux = <PINMUX_GPIO13__FUNC_SPI5_MI>,
+ <PINMUX_GPIO14__FUNC_SPI5_CSB>,
+ <PINMUX_GPIO15__FUNC_SPI5_MO>,
+@@ -746,63 +746,63 @@
+ };
+
+ uart0_pins_default: uart0-pins-default {
+- pins_rx {
++ pins-rx {
+ pinmux = <PINMUX_GPIO95__FUNC_URXD0>;
+ input-enable;
+ bias-pull-up;
+ };
+- pins_tx {
++ pins-tx {
+ pinmux = <PINMUX_GPIO96__FUNC_UTXD0>;
+ };
+ };
+
+ uart1_pins_default: uart1-pins-default {
+- pins_rx {
++ pins-rx {
+ pinmux = <PINMUX_GPIO121__FUNC_URXD1>;
+ input-enable;
+ bias-pull-up;
+ };
+- pins_tx {
++ pins-tx {
+ pinmux = <PINMUX_GPIO115__FUNC_UTXD1>;
+ };
+- pins_rts {
++ pins-rts {
+ pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
+ output-enable;
+ };
+- pins_cts {
++ pins-cts {
+ pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
+ input-enable;
+ };
+ };
+
+ uart1_pins_sleep: uart1-pins-sleep {
+- pins_rx {
++ pins-rx {
+ pinmux = <PINMUX_GPIO121__FUNC_GPIO121>;
+ input-enable;
+ bias-pull-up;
+ };
+- pins_tx {
++ pins-tx {
+ pinmux = <PINMUX_GPIO115__FUNC_UTXD1>;
+ };
+- pins_rts {
++ pins-rts {
+ pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
+ output-enable;
+ };
+- pins_cts {
++ pins-cts {
+ pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
+ input-enable;
+ };
+ };
+
+ wifi_pins_pwrseq: wifi-pins-pwrseq {
+- pins_wifi_enable {
++ pins-wifi-enable {
+ pinmux = <PINMUX_GPIO119__FUNC_GPIO119>;
+ output-low;
+ };
+ };
+
+ wifi_pins_wakeup: wifi-pins-wakeup {
+- pins_wifi_wakeup {
++ pins-wifi-wakeup {
+ pinmux = <PINMUX_GPIO113__FUNC_GPIO113>;
+ input-enable;
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+index a1d01639df30a..dd8d39861d9ca 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+@@ -178,7 +178,7 @@
+
+ &pio {
+ i2c_pins_0: i2c0 {
+- pins_i2c{
++ pins_i2c {
+ pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
+ <PINMUX_GPIO83__FUNC_SCL0>;
+ mediatek,pull-up-adv = <3>;
+@@ -187,7 +187,7 @@
+ };
+
+ i2c_pins_1: i2c1 {
+- pins_i2c{
++ pins_i2c {
+ pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
+ <PINMUX_GPIO84__FUNC_SCL1>;
+ mediatek,pull-up-adv = <3>;
+@@ -196,7 +196,7 @@
+ };
+
+ i2c_pins_2: i2c2 {
+- pins_i2c{
++ pins_i2c {
+ pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
+ <PINMUX_GPIO104__FUNC_SDA2>;
+ mediatek,pull-up-adv = <3>;
+@@ -205,7 +205,7 @@
+ };
+
+ i2c_pins_3: i2c3 {
+- pins_i2c{
++ pins_i2c {
+ pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
+ <PINMUX_GPIO51__FUNC_SDA3>;
+ mediatek,pull-up-adv = <3>;
+@@ -214,7 +214,7 @@
+ };
+
+ i2c_pins_4: i2c4 {
+- pins_i2c{
++ pins_i2c {
+ pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
+ <PINMUX_GPIO106__FUNC_SDA4>;
+ mediatek,pull-up-adv = <3>;
+@@ -223,7 +223,7 @@
+ };
+
+ i2c_pins_5: i2c5 {
+- pins_i2c{
++ pins_i2c {
+ pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
+ <PINMUX_GPIO49__FUNC_SDA5>;
+ mediatek,pull-up-adv = <3>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 268a1f28af8ce..10779a9947fe2 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1136,127 +1136,6 @@
+ nvmem-cell-names = "calibration-data";
+ };
+
+- thermal_zones: thermal-zones {
+- cpu_thermal: cpu-thermal {
+- polling-delay-passive = <100>;
+- polling-delay = <500>;
+- thermal-sensors = <&thermal 0>;
+- sustainable-power = <5000>;
+-
+- trips {
+- threshold: trip-point0 {
+- temperature = <68000>;
+- hysteresis = <2000>;
+- type = "passive";
+- };
+-
+- target: trip-point1 {
+- temperature = <80000>;
+- hysteresis = <2000>;
+- type = "passive";
+- };
+-
+- cpu_crit: cpu-crit {
+- temperature = <115000>;
+- hysteresis = <2000>;
+- type = "critical";
+- };
+- };
+-
+- cooling-maps {
+- map0 {
+- trip = <&target>;
+- cooling-device = <&cpu0
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>,
+- <&cpu1
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>,
+- <&cpu2
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>,
+- <&cpu3
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>;
+- contribution = <3072>;
+- };
+- map1 {
+- trip = <&target>;
+- cooling-device = <&cpu4
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>,
+- <&cpu5
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>,
+- <&cpu6
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>,
+- <&cpu7
+- THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>;
+- contribution = <1024>;
+- };
+- };
+- };
+-
+- /* The tzts1 ~ tzts6 don't need to polling */
+- /* The tzts1 ~ tzts6 don't need to thermal throttle */
+-
+- tzts1: tzts1 {
+- polling-delay-passive = <0>;
+- polling-delay = <0>;
+- thermal-sensors = <&thermal 1>;
+- sustainable-power = <5000>;
+- trips {};
+- cooling-maps {};
+- };
+-
+- tzts2: tzts2 {
+- polling-delay-passive = <0>;
+- polling-delay = <0>;
+- thermal-sensors = <&thermal 2>;
+- sustainable-power = <5000>;
+- trips {};
+- cooling-maps {};
+- };
+-
+- tzts3: tzts3 {
+- polling-delay-passive = <0>;
+- polling-delay = <0>;
+- thermal-sensors = <&thermal 3>;
+- sustainable-power = <5000>;
+- trips {};
+- cooling-maps {};
+- };
+-
+- tzts4: tzts4 {
+- polling-delay-passive = <0>;
+- polling-delay = <0>;
+- thermal-sensors = <&thermal 4>;
+- sustainable-power = <5000>;
+- trips {};
+- cooling-maps {};
+- };
+-
+- tzts5: tzts5 {
+- polling-delay-passive = <0>;
+- polling-delay = <0>;
+- thermal-sensors = <&thermal 5>;
+- sustainable-power = <5000>;
+- trips {};
+- cooling-maps {};
+- };
+-
+- tztsABB: tztsABB {
+- polling-delay-passive = <0>;
+- polling-delay = <0>;
+- thermal-sensors = <&thermal 6>;
+- sustainable-power = <5000>;
+- trips {};
+- cooling-maps {};
+- };
+- };
+-
+ pwm0: pwm@1100e000 {
+ compatible = "mediatek,mt8183-disp-pwm";
+ reg = <0 0x1100e000 0 0x1000>;
+@@ -2031,4 +1910,125 @@
+ power-domains = <&spm MT8183_POWER_DOMAIN_CAM>;
+ };
+ };
++
++ thermal_zones: thermal-zones {
++ cpu_thermal: cpu-thermal {
++ polling-delay-passive = <100>;
++ polling-delay = <500>;
++ thermal-sensors = <&thermal 0>;
++ sustainable-power = <5000>;
++
++ trips {
++ threshold: trip-point0 {
++ temperature = <68000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ target: trip-point1 {
++ temperature = <80000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ cpu_crit: cpu-crit {
++ temperature = <115000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&target>;
++ cooling-device = <&cpu0
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>,
++ <&cpu1
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>,
++ <&cpu2
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>,
++ <&cpu3
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ contribution = <3072>;
++ };
++ map1 {
++ trip = <&target>;
++ cooling-device = <&cpu4
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>,
++ <&cpu5
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>,
++ <&cpu6
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>,
++ <&cpu7
++ THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ contribution = <1024>;
++ };
++ };
++ };
++
++ /* The tzts1 ~ tzts6 don't need to polling */
++ /* The tzts1 ~ tzts6 don't need to thermal throttle */
++
++ tzts1: tzts1 {
++ polling-delay-passive = <0>;
++ polling-delay = <0>;
++ thermal-sensors = <&thermal 1>;
++ sustainable-power = <5000>;
++ trips {};
++ cooling-maps {};
++ };
++
++ tzts2: tzts2 {
++ polling-delay-passive = <0>;
++ polling-delay = <0>;
++ thermal-sensors = <&thermal 2>;
++ sustainable-power = <5000>;
++ trips {};
++ cooling-maps {};
++ };
++
++ tzts3: tzts3 {
++ polling-delay-passive = <0>;
++ polling-delay = <0>;
++ thermal-sensors = <&thermal 3>;
++ sustainable-power = <5000>;
++ trips {};
++ cooling-maps {};
++ };
++
++ tzts4: tzts4 {
++ polling-delay-passive = <0>;
++ polling-delay = <0>;
++ thermal-sensors = <&thermal 4>;
++ sustainable-power = <5000>;
++ trips {};
++ cooling-maps {};
++ };
++
++ tzts5: tzts5 {
++ polling-delay-passive = <0>;
++ polling-delay = <0>;
++ thermal-sensors = <&thermal 5>;
++ sustainable-power = <5000>;
++ trips {};
++ cooling-maps {};
++ };
++
++ tztsABB: tztsABB {
++ polling-delay-passive = <0>;
++ polling-delay = <0>;
++ thermal-sensors = <&thermal 6>;
++ sustainable-power = <5000>;
++ trips {};
++ cooling-maps {};
++ };
++ };
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+index 9b62e161db261..4b8a1c462906e 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+@@ -207,7 +207,7 @@
+ pinctrl-0 = <&i2c7_pins>;
+
+ pmic@34 {
+- #interrupt-cells = <1>;
++ #interrupt-cells = <2>;
+ compatible = "mediatek,mt6360";
+ reg = <0x34>;
+ interrupt-controller;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index ef2764a595eda..414cbe3451270 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -471,6 +471,8 @@
+
+ power-domain@MT8195_POWER_DOMAIN_VENC_CORE1 {
+ reg = <MT8195_POWER_DOMAIN_VENC_CORE1>;
++ clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>;
++ clock-names = "venc1-larb";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+@@ -533,6 +535,8 @@
+
+ power-domain@MT8195_POWER_DOMAIN_VENC {
+ reg = <MT8195_POWER_DOMAIN_VENC>;
++ clocks = <&vencsys CLK_VENC_LARB>;
++ clock-names = "venc0-larb";
+ mediatek,infracfg = <&infracfg_ao>;
+ #power-domain-cells = <0>;
+ };
+@@ -1985,7 +1989,7 @@
+ reg = <0 0x1b010000 0 0x1000>;
+ mediatek,larb-id = <20>;
+ mediatek,smi = <&smi_common_vpp>;
+- clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>,
++ clocks = <&vencsys_core1 CLK_VENC_CORE1_VENC>,
+ <&vencsys_core1 CLK_VENC_CORE1_GALS>,
+ <&vppsys0 CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1>;
+ clock-names = "apb", "smi", "gals";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+index 49ae15708a0b6..905a50aa5dc38 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+@@ -666,7 +666,7 @@
+
+ vdec: video-codec@ff360000 {
+ compatible = "rockchip,rk3328-vdec", "rockchip,rk3399-vdec";
+- reg = <0x0 0xff360000 0x0 0x400>;
++ reg = <0x0 0xff360000 0x0 0x480>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_RKVDEC>, <&cru HCLK_RKVDEC>,
+ <&cru SCLK_VDEC_CABAC>, <&cru SCLK_VDEC_CORE>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+index 5f3caf01badeb..a7e6eccb14cc6 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -1062,7 +1062,9 @@
+ power-domain@RK3399_PD_VDU {
+ reg = <RK3399_PD_VDU>;
+ clocks = <&cru ACLK_VDU>,
+- <&cru HCLK_VDU>;
++ <&cru HCLK_VDU>,
++ <&cru SCLK_VDU_CA>,
++ <&cru SCLK_VDU_CORE>;
+ pm_qos = <&qos_video_m1_r>,
+ <&qos_video_m1_w>;
+ #power-domain-cells = <0>;
+@@ -1338,7 +1340,7 @@
+
+ vdec: video-codec@ff660000 {
+ compatible = "rockchip,rk3399-vdec";
+- reg = <0x0 0xff660000 0x0 0x400>;
++ reg = <0x0 0xff660000 0x0 0x480>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
+ <&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index 82b4402810da0..40ed49d9adff5 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -796,8 +796,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+
+ /* function return */
+ case BPF_JMP | BPF_EXIT:
+- emit_sext_32(ctx, regmap[BPF_REG_0], true);
+-
+ if (i == ctx->prog->len - 1)
+ break;
+
+@@ -844,14 +842,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ }
+ break;
+ case BPF_DW:
+- if (is_signed_imm12(off)) {
+- emit_insn(ctx, ldd, dst, src, off);
+- } else if (is_signed_imm14(off)) {
+- emit_insn(ctx, ldptrd, dst, src, off);
+- } else {
+- move_imm(ctx, t1, off, is32);
+- emit_insn(ctx, ldxd, dst, src, t1);
+- }
++ move_imm(ctx, t1, off, is32);
++ emit_insn(ctx, ldxd, dst, src, t1);
+ break;
+ }
+ break;
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 0e62f5edaee2e..585783c9907ef 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -483,6 +483,7 @@ config MACH_LOONGSON2EF
+
+ config MACH_LOONGSON64
+ bool "Loongson 64-bit family of machines"
++ select ARCH_DMA_DEFAULT_COHERENT
+ select ARCH_SPARSEMEM_ENABLE
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
+@@ -1304,6 +1305,7 @@ config CPU_LOONGSON64
+ select CPU_SUPPORTS_MSA
+ select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT
+ select CPU_MIPSR2_IRQ_VI
++ select DMA_NONCOHERENT
+ select WEAK_ORDERING
+ select WEAK_REORDERING_BEYOND_LLSC
+ select MIPS_ASID_BITS_VARIABLE
+diff --git a/arch/mips/include/asm/mach-loongson64/boot_param.h b/arch/mips/include/asm/mach-loongson64/boot_param.h
+index 035b1a69e2d00..e007edd6b60a7 100644
+--- a/arch/mips/include/asm/mach-loongson64/boot_param.h
++++ b/arch/mips/include/asm/mach-loongson64/boot_param.h
+@@ -14,7 +14,11 @@
+ #define ADAPTER_ROM 8
+ #define ACPI_TABLE 9
+ #define SMBIOS_TABLE 10
+-#define MAX_MEMORY_TYPE 11
++#define UMA_VIDEO_RAM 11
++#define VUMA_VIDEO_RAM 12
++#define MAX_MEMORY_TYPE 13
++
++#define MEM_SIZE_IS_IN_BYTES (1 << 31)
+
+ #define LOONGSON3_BOOT_MEM_MAP_MAX 128
+ struct efi_memory_map_loongson {
+@@ -117,7 +121,8 @@ struct irq_source_routing_table {
+ u64 pci_io_start_addr;
+ u64 pci_io_end_addr;
+ u64 pci_config_addr;
+- u32 dma_mask_bits;
++ u16 dma_mask_bits;
++ u16 dma_noncoherent;
+ } __packed;
+
+ struct interface_info {
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index bbe9ce471791e..17d80e2f2e4cb 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -121,6 +121,19 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+ /* Put the stack after the struct pt_regs. */
+ childksp = (unsigned long) childregs;
+ p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
++
++ /*
++ * New tasks lose permission to use the fpu. This accelerates context
++ * switching for most programs since they don't use the fpu.
++ */
++ clear_tsk_thread_flag(p, TIF_USEDFPU);
++ clear_tsk_thread_flag(p, TIF_USEDMSA);
++ clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
++
++#ifdef CONFIG_MIPS_MT_FPAFF
++ clear_tsk_thread_flag(p, TIF_FPUBOUND);
++#endif /* CONFIG_MIPS_MT_FPAFF */
++
+ if (unlikely(args->fn)) {
+ /* kernel thread */
+ unsigned long status = p->thread.cp0_status;
+@@ -149,20 +162,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+ p->thread.reg29 = (unsigned long) childregs;
+ p->thread.reg31 = (unsigned long) ret_from_fork;
+
+- /*
+- * New tasks lose permission to use the fpu. This accelerates context
+- * switching for most programs since they don't use the fpu.
+- */
+ childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
+
+- clear_tsk_thread_flag(p, TIF_USEDFPU);
+- clear_tsk_thread_flag(p, TIF_USEDMSA);
+- clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
+-
+-#ifdef CONFIG_MIPS_MT_FPAFF
+- clear_tsk_thread_flag(p, TIF_FPUBOUND);
+-#endif /* CONFIG_MIPS_MT_FPAFF */
+-
+ #ifdef CONFIG_MIPS_FP_SUPPORT
+ atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+ #endif
+diff --git a/arch/mips/loongson64/env.c b/arch/mips/loongson64/env.c
+index c961e2999f15a..ef3750a6ffacf 100644
+--- a/arch/mips/loongson64/env.c
++++ b/arch/mips/loongson64/env.c
+@@ -13,6 +13,8 @@
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ */
++
++#include <linux/dma-map-ops.h>
+ #include <linux/export.h>
+ #include <linux/pci_ids.h>
+ #include <asm/bootinfo.h>
+@@ -147,8 +149,14 @@ void __init prom_lefi_init_env(void)
+
+ loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
+ if (loongson_sysconf.dma_mask_bits < 32 ||
+- loongson_sysconf.dma_mask_bits > 64)
++ loongson_sysconf.dma_mask_bits > 64) {
+ loongson_sysconf.dma_mask_bits = 32;
++ dma_default_coherent = true;
++ } else {
++ dma_default_coherent = !eirq_source->dma_noncoherent;
++ }
++
++ pr_info("Firmware: Coherent DMA: %s\n", dma_default_coherent ? "on" : "off");
+
+ loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
+ loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;
+diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
+index ee8de1735b7c0..f25caa6aa9d30 100644
+--- a/arch/mips/loongson64/init.c
++++ b/arch/mips/loongson64/init.c
+@@ -49,8 +49,7 @@ void virtual_early_config(void)
+ void __init szmem(unsigned int node)
+ {
+ u32 i, mem_type;
+- static unsigned long num_physpages;
+- u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size;
++ phys_addr_t node_id, mem_start, mem_size;
+
+ /* Otherwise come from DTB */
+ if (loongson_sysconf.fw_interface != LOONGSON_LEFI)
+@@ -64,30 +63,46 @@ void __init szmem(unsigned int node)
+
+ mem_type = loongson_memmap->map[i].mem_type;
+ mem_size = loongson_memmap->map[i].mem_size;
+- mem_start = loongson_memmap->map[i].mem_start;
++
++ /* Memory size comes in MB if MEM_SIZE_IS_IN_BYTES not set */
++ if (mem_size & MEM_SIZE_IS_IN_BYTES)
++ mem_size &= ~MEM_SIZE_IS_IN_BYTES;
++ else
++ mem_size = mem_size << 20;
++
++ mem_start = (node_id << 44) | loongson_memmap->map[i].mem_start;
+
+ switch (mem_type) {
+ case SYSTEM_RAM_LOW:
+ case SYSTEM_RAM_HIGH:
+- start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
+- node_psize = (mem_size << 20) >> PAGE_SHIFT;
+- end_pfn = start_pfn + node_psize;
+- num_physpages += node_psize;
+- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
+- (u32)node_id, mem_type, mem_start, mem_size);
+- pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+- start_pfn, end_pfn, num_physpages);
+- memblock_add_node(PFN_PHYS(start_pfn),
+- PFN_PHYS(node_psize), node,
++ case UMA_VIDEO_RAM:
++ pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes usable\n",
++ (u32)node_id, mem_type, &mem_start, &mem_size);
++ memblock_add_node(mem_start, mem_size, node,
+ MEMBLOCK_NONE);
+ break;
+ case SYSTEM_RAM_RESERVED:
+- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
+- (u32)node_id, mem_type, mem_start, mem_size);
+- memblock_reserve(((node_id << 44) + mem_start), mem_size << 20);
++ case VIDEO_ROM:
++ case ADAPTER_ROM:
++ case ACPI_TABLE:
++ case SMBIOS_TABLE:
++ pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes reserved\n",
++ (u32)node_id, mem_type, &mem_start, &mem_size);
++ memblock_reserve(mem_start, mem_size);
++ break;
++ /* We should not reserve VUMA_VIDEO_RAM as it overlaps with MMIO */
++ case VUMA_VIDEO_RAM:
++ default:
++ pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes unhandled\n",
++ (u32)node_id, mem_type, &mem_start, &mem_size);
+ break;
+ }
+ }
++
++ /* Reserve vgabios if it comes from firmware */
++ if (loongson_sysconf.vgabios_addr)
++ memblock_reserve(virt_to_phys((void *)loongson_sysconf.vgabios_addr),
++ SZ_256K);
+ }
+
+ #ifndef CONFIG_NUMA
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index 67c26e81e2150..345d5e021484c 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -105,9 +105,12 @@ config ARCH_HAS_ILOG2_U64
+ default n
+
+ config GENERIC_BUG
+- bool
+- default y
++ def_bool y
+ depends on BUG
++ select GENERIC_BUG_RELATIVE_POINTERS if 64BIT
++
++config GENERIC_BUG_RELATIVE_POINTERS
++ bool
+
+ config GENERIC_HWEIGHT
+ bool
+diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
+index b9cad0bb4461b..833555f74ffa7 100644
+--- a/arch/parisc/include/asm/bug.h
++++ b/arch/parisc/include/asm/bug.h
+@@ -17,26 +17,27 @@
+ #define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff"
+ #define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */
+
+-#if defined(CONFIG_64BIT)
+-#define ASM_WORD_INSN ".dword\t"
++#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
++# define __BUG_REL(val) ".word " __stringify(val) " - ."
+ #else
+-#define ASM_WORD_INSN ".word\t"
++# define __BUG_REL(val) ".word " __stringify(val)
+ #endif
+
++
+ #ifdef CONFIG_DEBUG_BUGVERBOSE
+ #define BUG() \
+ do { \
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+ "\t.pushsection __bug_table,\"a\"\n" \
+- "\t.align %4\n" \
+- "2:\t" ASM_WORD_INSN "1b, %c0\n" \
++ "\t.align 4\n" \
++ "2:\t" __BUG_REL(1b) "\n" \
++ "\t" __BUG_REL(%c0) "\n" \
+ "\t.short %1, %2\n" \
+- "\t.blockz %3-2*%4-2*2\n" \
++ "\t.blockz %3-2*4-2*2\n" \
+ "\t.popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+- "i" (0), "i" (sizeof(struct bug_entry)), \
+- "i" (sizeof(long)) ); \
++ "i" (0), "i" (sizeof(struct bug_entry)) ); \
+ unreachable(); \
+ } while(0)
+
+@@ -54,15 +55,15 @@
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+ "\t.pushsection __bug_table,\"a\"\n" \
+- "\t.align %4\n" \
+- "2:\t" ASM_WORD_INSN "1b, %c0\n" \
++ "\t.align 4\n" \
++ "2:\t" __BUG_REL(1b) "\n" \
++ "\t" __BUG_REL(%c0) "\n" \
+ "\t.short %1, %2\n" \
+- "\t.blockz %3-2*%4-2*2\n" \
++ "\t.blockz %3-2*4-2*2\n" \
+ "\t.popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (BUGFLAG_WARNING|(flags)), \
+- "i" (sizeof(struct bug_entry)), \
+- "i" (sizeof(long)) ); \
++ "i" (sizeof(struct bug_entry)) ); \
+ } while(0)
+ #else
+ #define __WARN_FLAGS(flags) \
+@@ -70,14 +71,13 @@
+ asm volatile("\n" \
+ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
+ "\t.pushsection __bug_table,\"a\"\n" \
+- "\t.align %2\n" \
+- "2:\t" ASM_WORD_INSN "1b\n" \
++ "\t.align 4\n" \
++ "2:\t" __BUG_REL(1b) "\n" \
+ "\t.short %0\n" \
+- "\t.blockz %1-%2-2\n" \
++ "\t.blockz %1-4-2\n" \
+ "\t.popsection" \
+ : : "i" (BUGFLAG_WARNING|(flags)), \
+- "i" (sizeof(struct bug_entry)), \
+- "i" (sizeof(long)) ); \
++ "i" (sizeof(struct bug_entry)) ); \
+ } while(0)
+ #endif
+
+diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
+index 69774bb362d6a..29d78eefc8894 100644
+--- a/arch/riscv/Kconfig.socs
++++ b/arch/riscv/Kconfig.socs
+@@ -23,6 +23,7 @@ config SOC_STARFIVE
+ select PINCTRL
+ select RESET_CONTROLLER
+ select SIFIVE_PLIC
++ select ARM_AMBA
+ help
+ This enables support for StarFive SoC platform hardware.
+
+diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
+index 378f5b1514435..5348d842c7453 100644
+--- a/arch/riscv/kernel/traps_misaligned.c
++++ b/arch/riscv/kernel/traps_misaligned.c
+@@ -342,16 +342,14 @@ int handle_misaligned_store(struct pt_regs *regs)
+ } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
+ len = 8;
+ val.data_ulong = GET_RS2S(insn, regs);
+- } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
+- ((insn >> SH_RD) & 0x1f)) {
++ } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
+ len = 8;
+ val.data_ulong = GET_RS2C(insn, regs);
+ #endif
+ } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
+ len = 4;
+ val.data_ulong = GET_RS2S(insn, regs);
+- } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
+- ((insn >> SH_RD) & 0x1f)) {
++ } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
+ len = 4;
+ val.data_ulong = GET_RS2C(insn, regs);
+ } else {
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index 4909dcd762e8c..9977d637f836d 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -731,7 +731,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
+ pte_clear(mm, addr, ptep);
+ }
+ if (reset)
+- pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
++ pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
+ pgste_set_unlock(ptep, pgste);
+ preempt_enable();
+ }
+diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
+index 8a1d48b8c2a3e..d0565a9e7d8c9 100644
+--- a/arch/x86/coco/tdx/tdx.c
++++ b/arch/x86/coco/tdx/tdx.c
+@@ -8,6 +8,7 @@
+ #include <asm/coco.h>
+ #include <asm/tdx.h>
+ #include <asm/vmx.h>
++#include <asm/ia32.h>
+ #include <asm/insn.h>
+ #include <asm/insn-eval.h>
+ #include <asm/pgtable.h>
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 93c60c0c9d4a7..9c0b26ae51069 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -25,6 +25,7 @@
+ #include <xen/events.h>
+ #endif
+
++#include <asm/apic.h>
+ #include <asm/desc.h>
+ #include <asm/traps.h>
+ #include <asm/vdso.h>
+@@ -96,6 +97,10 @@ static __always_inline int syscall_32_enter(struct pt_regs *regs)
+ return (int)regs->orig_ax;
+ }
+
++#ifdef CONFIG_IA32_EMULATION
++bool __ia32_enabled __ro_after_init = true;
++#endif
++
+ /*
+ * Invoke a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL.
+ */
+@@ -115,7 +120,96 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
+ }
+ }
+
+-/* Handles int $0x80 */
++#ifdef CONFIG_IA32_EMULATION
++static __always_inline bool int80_is_external(void)
++{
++ const unsigned int offs = (0x80 / 32) * 0x10;
++ const u32 bit = BIT(0x80 % 32);
++
++ /* The local APIC on XENPV guests is fake */
++ if (cpu_feature_enabled(X86_FEATURE_XENPV))
++ return false;
++
++ /*
++ * If vector 0x80 is set in the APIC ISR then this is an external
++ * interrupt. Either from broken hardware or injected by a VMM.
++ *
++ * Note: In guest mode this is only valid for secure guests where
++ * the secure module fully controls the vAPIC exposed to the guest.
++ */
++ return apic_read(APIC_ISR + offs) & bit;
++}
++
++/**
++ * int80_emulation - 32-bit legacy syscall entry
++ *
++ * This entry point can be used by 32-bit and 64-bit programs to perform
++ * 32-bit system calls. Instances of INT $0x80 can be found inline in
++ * various programs and libraries. It is also used by the vDSO's
++ * __kernel_vsyscall fallback for hardware that doesn't support a faster
++ * entry method. Restarted 32-bit system calls also fall back to INT
++ * $0x80 regardless of what instruction was originally used to do the
++ * system call.
++ *
++ * This is considered a slow path. It is not used by most libc
++ * implementations on modern hardware except during process startup.
++ *
++ * The arguments for the INT $0x80 based syscall are on stack in the
++ * pt_regs structure:
++ * eax: system call number
++ * ebx, ecx, edx, esi, edi, ebp: arg1 - arg 6
++ */
++DEFINE_IDTENTRY_RAW(int80_emulation)
++{
++ int nr;
++
++ /* Kernel does not use INT $0x80! */
++ if (unlikely(!user_mode(regs))) {
++ irqentry_enter(regs);
++ instrumentation_begin();
++ panic("Unexpected external interrupt 0x80\n");
++ }
++
++ /*
++ * Establish kernel context for instrumentation, including for
++ * int80_is_external() below which calls into the APIC driver.
++ * Identical for soft and external interrupts.
++ */
++ enter_from_user_mode(regs);
++
++ instrumentation_begin();
++ add_random_kstack_offset();
++
++ /* Validate that this is a soft interrupt to the extent possible */
++ if (unlikely(int80_is_external()))
++ panic("Unexpected external interrupt 0x80\n");
++
++ /*
++ * The low level idtentry code pushed -1 into regs::orig_ax
++ * and regs::ax contains the syscall number.
++ *
++ * User tracing code (ptrace or signal handlers) might assume
++ * that the regs::orig_ax contains a 32-bit number on invoking
++ * a 32-bit syscall.
++ *
++ * Establish the syscall convention by saving the 32bit truncated
++ * syscall number in regs::orig_ax and by invalidating regs::ax.
++ */
++ regs->orig_ax = regs->ax & GENMASK(31, 0);
++ regs->ax = -ENOSYS;
++
++ nr = syscall_32_enter(regs);
++
++ local_irq_enable();
++ nr = syscall_enter_from_user_mode_work(regs, nr);
++ do_syscall_32_irqs_on(regs, nr);
++
++ instrumentation_end();
++ syscall_exit_to_user_mode(regs);
++}
++#else /* CONFIG_IA32_EMULATION */
++
++/* Handles int $0x80 on a 32bit kernel */
+ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
+ {
+ int nr = syscall_32_enter(regs);
+@@ -134,6 +228,7 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
+ instrumentation_end();
+ syscall_exit_to_user_mode(regs);
+ }
++#endif /* !CONFIG_IA32_EMULATION */
+
+ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
+ {
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index 4dd19819053a5..d6c08d8986b17 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -277,80 +277,3 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR
+ int3
+ SYM_CODE_END(entry_SYSCALL_compat)
+-
+-/*
+- * 32-bit legacy system call entry.
+- *
+- * 32-bit x86 Linux system calls traditionally used the INT $0x80
+- * instruction. INT $0x80 lands here.
+- *
+- * This entry point can be used by 32-bit and 64-bit programs to perform
+- * 32-bit system calls. Instances of INT $0x80 can be found inline in
+- * various programs and libraries. It is also used by the vDSO's
+- * __kernel_vsyscall fallback for hardware that doesn't support a faster
+- * entry method. Restarted 32-bit system calls also fall back to INT
+- * $0x80 regardless of what instruction was originally used to do the
+- * system call.
+- *
+- * This is considered a slow path. It is not used by most libc
+- * implementations on modern hardware except during process startup.
+- *
+- * Arguments:
+- * eax system call number
+- * ebx arg1
+- * ecx arg2
+- * edx arg3
+- * esi arg4
+- * edi arg5
+- * ebp arg6
+- */
+-SYM_CODE_START(entry_INT80_compat)
+- UNWIND_HINT_ENTRY
+- ENDBR
+- /*
+- * Interrupts are off on entry.
+- */
+- ASM_CLAC /* Do this early to minimize exposure */
+- ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
+-
+- /*
+- * User tracing code (ptrace or signal handlers) might assume that
+- * the saved RAX contains a 32-bit number when we're invoking a 32-bit
+- * syscall. Just in case the high bits are nonzero, zero-extend
+- * the syscall number. (This could almost certainly be deleted
+- * with no ill effects.)
+- */
+- movl %eax, %eax
+-
+- /* switch to thread stack expects orig_ax and rdi to be pushed */
+- pushq %rax /* pt_regs->orig_ax */
+-
+- /* Need to switch before accessing the thread stack. */
+- SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+-
+- /* In the Xen PV case we already run on the thread stack. */
+- ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
+-
+- movq %rsp, %rax
+- movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+-
+- pushq 5*8(%rax) /* regs->ss */
+- pushq 4*8(%rax) /* regs->rsp */
+- pushq 3*8(%rax) /* regs->eflags */
+- pushq 2*8(%rax) /* regs->cs */
+- pushq 1*8(%rax) /* regs->ip */
+- pushq 0*8(%rax) /* regs->orig_ax */
+-.Lint80_keep_stack:
+-
+- PUSH_AND_CLEAR_REGS rax=$-ENOSYS
+- UNWIND_HINT_REGS
+-
+- cld
+-
+- IBRS_ENTER
+- UNTRAIN_RET
+-
+- movq %rsp, %rdi
+- call do_int80_syscall_32
+- jmp swapgs_restore_regs_and_return_to_usermode
+-SYM_CODE_END(entry_INT80_compat)
+diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
+index fada857f0a1ed..9805629479d96 100644
+--- a/arch/x86/include/asm/ia32.h
++++ b/arch/x86/include/asm/ia32.h
+@@ -68,6 +68,27 @@ extern void ia32_pick_mmap_layout(struct mm_struct *mm);
+
+ #endif
+
+-#endif /* CONFIG_IA32_EMULATION */
++extern bool __ia32_enabled;
++
++static inline bool ia32_enabled(void)
++{
++ return __ia32_enabled;
++}
++
++static inline void ia32_disable(void)
++{
++ __ia32_enabled = false;
++}
++
++#else /* !CONFIG_IA32_EMULATION */
++
++static inline bool ia32_enabled(void)
++{
++ return IS_ENABLED(CONFIG_X86_32);
++}
++
++static inline void ia32_disable(void) {}
++
++#endif
+
+ #endif /* _ASM_X86_IA32_H */
+diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
+index 72184b0b2219e..fca710a93eb9c 100644
+--- a/arch/x86/include/asm/idtentry.h
++++ b/arch/x86/include/asm/idtentry.h
+@@ -569,6 +569,10 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_UD, exc_invalid_op);
+ DECLARE_IDTENTRY_RAW(X86_TRAP_BP, exc_int3);
+ DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF, exc_page_fault);
+
++#if defined(CONFIG_IA32_EMULATION)
++DECLARE_IDTENTRY_RAW(IA32_SYSCALL_VECTOR, int80_emulation);
++#endif
++
+ #ifdef CONFIG_X86_MCE
+ #ifdef CONFIG_X86_64
+ DECLARE_IDTENTRY_MCE(X86_TRAP_MC, exc_machine_check);
+diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
+index 12ef86b19910d..84294b66b9162 100644
+--- a/arch/x86/include/asm/proto.h
++++ b/arch/x86/include/asm/proto.h
+@@ -32,10 +32,6 @@ void entry_SYSCALL_compat(void);
+ void entry_SYSCALL_compat_safe_stack(void);
+ void entry_SYSRETL_compat_unsafe_stack(void);
+ void entry_SYSRETL_compat_end(void);
+-void entry_INT80_compat(void);
+-#ifdef CONFIG_XEN_PV
+-void xen_entry_INT80_compat(void);
+-#endif
+ #endif
+
+ void x86_configure_nx(void);
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index b66960358381b..c1d09c8844d67 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -1291,6 +1291,9 @@ static void zenbleed_check_cpu(void *unused)
+
+ void amd_check_microcode(void)
+ {
++ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
++ return;
++
+ on_each_cpu(zenbleed_check_cpu, NULL, 1);
+ }
+
+diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
+index a58c6bc1cd68c..f5a3374e62cb1 100644
+--- a/arch/x86/kernel/idt.c
++++ b/arch/x86/kernel/idt.c
+@@ -117,7 +117,7 @@ static const __initconst struct idt_data def_idts[] = {
+
+ SYSG(X86_TRAP_OF, asm_exc_overflow),
+ #if defined(CONFIG_IA32_EMULATION)
+- SYSG(IA32_SYSCALL_VECTOR, entry_INT80_compat),
++ SYSG(IA32_SYSCALL_VECTOR, asm_int80_emulation),
+ #elif defined(CONFIG_X86_32)
+ SYSG(IA32_SYSCALL_VECTOR, entry_INT80_32),
+ #endif
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index 68b2a9d3dbc6b..c8dfb0fdde7f9 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -1279,10 +1279,6 @@ void setup_ghcb(void)
+ if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
+ return;
+
+- /* First make sure the hypervisor talks a supported protocol. */
+- if (!sev_es_negotiate_protocol())
+- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+-
+ /*
+ * Check whether the runtime #VC exception handler is active. It uses
+ * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
+@@ -1297,6 +1293,13 @@ void setup_ghcb(void)
+ return;
+ }
+
++ /*
++ * Make sure the hypervisor talks a supported protocol.
++ * This gets called only in the BSP boot phase.
++ */
++ if (!sev_es_negotiate_protocol())
++ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
++
+ /*
+ * Clear the boot_ghcb. The first exception comes in before the bss
+ * section is cleared.
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 4194aa4c5f0e0..4a663812562db 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1786,15 +1786,17 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+ bool old_paging = is_paging(vcpu);
+
+ #ifdef CONFIG_X86_64
+- if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
++ if (vcpu->arch.efer & EFER_LME) {
+ if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
+ vcpu->arch.efer |= EFER_LMA;
+- svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
++ if (!vcpu->arch.guest_state_protected)
++ svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
+ }
+
+ if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
+ vcpu->arch.efer &= ~EFER_LMA;
+- svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
++ if (!vcpu->arch.guest_state_protected)
++ svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
+ }
+ }
+ #endif
+diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
+index 3ea0f763540a4..3e93af083e037 100644
+--- a/arch/x86/mm/mem_encrypt_amd.c
++++ b/arch/x86/mm/mem_encrypt_amd.c
+@@ -34,6 +34,7 @@
+ #include <asm/msr.h>
+ #include <asm/cmdline.h>
+ #include <asm/sev.h>
++#include <asm/ia32.h>
+
+ #include "mm_internal.h"
+
+@@ -502,6 +503,16 @@ void __init sme_early_init(void)
+ x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish;
+ x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required;
+ x86_platform.guest.enc_cache_flush_required = amd_enc_cache_flush_required;
++
++ /*
++ * The VMM is capable of injecting interrupt 0x80 and triggering the
++ * compatibility syscall path.
++ *
++ * By default, the 32-bit emulation is disabled in order to ensure
++ * the safety of the VM.
++ */
++ if (sev_status & MSR_AMD64_SEV_ENABLED)
++ ia32_disable();
+ }
+
+ void __init mem_encrypt_free_decrypted_mem(void)
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 333539bdbdaae..9280e15de3af5 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -623,7 +623,7 @@ static struct trap_array_entry trap_array[] = {
+ TRAP_ENTRY(exc_int3, false ),
+ TRAP_ENTRY(exc_overflow, false ),
+ #ifdef CONFIG_IA32_EMULATION
+- { entry_INT80_compat, xen_entry_INT80_compat, false },
++ TRAP_ENTRY(int80_emulation, false ),
+ #endif
+ TRAP_ENTRY(exc_page_fault, false ),
+ TRAP_ENTRY(exc_divide_error, false ),
+diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
+index 6b4fdf6b95422..dec5e03e7a2cf 100644
+--- a/arch/x86/xen/xen-asm.S
++++ b/arch/x86/xen/xen-asm.S
+@@ -156,7 +156,7 @@ xen_pv_trap asm_xenpv_exc_machine_check
+ #endif /* CONFIG_X86_MCE */
+ xen_pv_trap asm_exc_simd_coprocessor_error
+ #ifdef CONFIG_IA32_EMULATION
+-xen_pv_trap entry_INT80_compat
++xen_pv_trap asm_int80_emulation
+ #endif
+ xen_pv_trap asm_exc_xen_unknown_trap
+ xen_pv_trap asm_exc_xen_hypervisor_callback
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index a0e347f6f97eb..94154a849a3ea 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -1563,17 +1563,22 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
+ int err;
+ const struct iommu_ops *ops;
+
++ /* Serialise to make dev->iommu stable under our potential fwspec */
++ mutex_lock(&iommu_probe_device_lock);
+ /*
+ * If we already translated the fwspec there is nothing left to do,
+ * return the iommu_ops.
+ */
+ ops = acpi_iommu_fwspec_ops(dev);
+- if (ops)
++ if (ops) {
++ mutex_unlock(&iommu_probe_device_lock);
+ return ops;
++ }
+
+ err = iort_iommu_configure_id(dev, id_in);
+ if (err && err != -EPROBE_DEFER)
+ err = viot_iommu_configure(dev);
++ mutex_unlock(&iommu_probe_device_lock);
+
+ /*
+ * If we have reason to believe the IOMMU driver missed the initial
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index e4a6da81cd4b3..9cc3a2b1b4fc1 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -4788,6 +4788,7 @@ static void binder_release_work(struct binder_proc *proc,
+ "undelivered TRANSACTION_ERROR: %u\n",
+ e->cmd);
+ } break;
++ case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
+ case BINDER_WORK_TRANSACTION_COMPLETE: {
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered TRANSACTION_COMPLETE\n");
+diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
+index 1c06781f71148..f3bd9f104bd12 100644
+--- a/drivers/base/devcoredump.c
++++ b/drivers/base/devcoredump.c
+@@ -363,6 +363,7 @@ void dev_coredumpm(struct device *dev, struct module *owner,
+ devcd->devcd_dev.class = &devcd_class;
+
+ mutex_lock(&devcd->mutex);
++ dev_set_uevent_suppress(&devcd->devcd_dev, true);
+ if (device_add(&devcd->devcd_dev))
+ goto put_device;
+
+@@ -377,6 +378,8 @@ void dev_coredumpm(struct device *dev, struct module *owner,
+ "devcoredump"))
+ dev_warn(dev, "devcoredump create_link failed\n");
+
++ dev_set_uevent_suppress(&devcd->devcd_dev, false);
++ kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
+ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+ schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
+ mutex_unlock(&devcd->mutex);
+diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
+index cf3fa998093de..4f3dd9316fb2f 100644
+--- a/drivers/base/regmap/regcache.c
++++ b/drivers/base/regmap/regcache.c
+@@ -410,8 +410,7 @@ out:
+ rb_entry(node, struct regmap_range_node, node);
+
+ /* If there's nothing in the cache there's nothing to sync */
+- ret = regcache_read(map, this->selector_reg, &i);
+- if (ret != 0)
++ if (regcache_read(map, this->selector_reg, &i) != 0)
+ continue;
+
+ ret = _regmap_write(map, this->selector_reg, i);
+diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
+index cd27bf173dec8..a64648682c72c 100644
+--- a/drivers/gpio/gpiolib-sysfs.c
++++ b/drivers/gpio/gpiolib-sysfs.c
+@@ -463,14 +463,17 @@ static ssize_t export_store(struct class *class,
+ goto done;
+
+ status = gpiod_set_transitory(desc, false);
+- if (!status) {
+- status = gpiod_export(desc, true);
+- if (status < 0)
+- gpiod_free(desc);
+- else
+- set_bit(FLAG_SYSFS, &desc->flags);
++ if (status) {
++ gpiod_free(desc);
++ goto done;
+ }
+
++ status = gpiod_export(desc, true);
++ if (status < 0)
++ gpiod_free(desc);
++ else
++ set_bit(FLAG_SYSFS, &desc->flags);
++
+ done:
+ if (status)
+ pr_debug("%s: status %d\n", __func__, status);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index ced4e7e8f98b5..133e4e03c143c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -201,7 +201,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+ }
+
+ for (i = 0; i < p->nchunks; i++) {
+- struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
++ struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
+ struct drm_amdgpu_cs_chunk user_chunk;
+ uint32_t __user *cdata;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 2fced451f0aea..aabde6ebb190e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -90,7 +90,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
+
+ struct drm_crtc *crtc = &amdgpu_crtc->base;
+ unsigned long flags;
+- unsigned i;
++ unsigned int i;
+ int vpos, hpos;
+
+ for (i = 0; i < work->shared_count; ++i)
+@@ -167,7 +167,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
+ u64 tiling_flags;
+ int i, r;
+
+- work = kzalloc(sizeof *work, GFP_KERNEL);
++ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+@@ -298,18 +298,17 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
+
+ adev = drm_to_adev(dev);
+ /* if we have active crtcs and we don't have a power ref,
+- take the current one */
++ * take the current one
++ */
+ if (active && !adev->have_disp_power_ref) {
+ adev->have_disp_power_ref = true;
+ return ret;
+ }
+- /* if we have no active crtcs, then drop the power ref
+- we got before */
+- if (!active && adev->have_disp_power_ref) {
+- pm_runtime_put_autosuspend(dev->dev);
++ /* if we have no active crtcs, then go to
++ * drop the power ref we got before
++ */
++ if (!active && adev->have_disp_power_ref)
+ adev->have_disp_power_ref = false;
+- }
+-
+ out:
+ /* drop the power reference we got coming in here */
+ pm_runtime_put_autosuspend(dev->dev);
+@@ -473,11 +472,10 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
+ if (amdgpu_connector->router.ddc_valid)
+ amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
+
+- if (use_aux) {
++ if (use_aux)
+ ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
+- } else {
++ else
+ ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
+- }
+
+ if (ret != 2)
+ /* Couldn't find an accessible DDC on this connector */
+@@ -486,10 +484,12 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
+ * EDID header starts with:
+ * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
+ * Only the first 6 bytes must be valid as
+- * drm_edid_block_valid() can fix the last 2 bytes */
++ * drm_edid_block_valid() can fix the last 2 bytes
++ */
+ if (drm_edid_header_is_valid(buf) < 6) {
+ /* Couldn't find an accessible EDID on this
+- * connector */
++ * connector
++ */
+ return false;
+ }
+ return true;
+@@ -1204,8 +1204,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
+
+ obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
+ if (obj == NULL) {
+- drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, "
+- "can't create framebuffer\n", mode_cmd->handles[0]);
++ drm_dbg_kms(dev,
++ "No GEM object associated to handle 0x%08X, can't create framebuffer\n",
++ mode_cmd->handles[0]);
++
+ return ERR_PTR(-ENOENT);
+ }
+
+@@ -1398,6 +1400,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+ }
+ if (amdgpu_crtc->rmx_type != RMX_OFF) {
+ fixed20_12 a, b;
++
+ a.full = dfixed_const(src_v);
+ b.full = dfixed_const(dst_v);
+ amdgpu_crtc->vsc.full = dfixed_div(a, b);
+@@ -1417,7 +1420,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+ *
+ * \param dev Device to query.
+ * \param pipe Crtc to query.
+- * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
++ * \param flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ * For driver internal use only also supports these flags:
+ *
+ * USE_REAL_VBLANKSTART to use the real start of vblank instead
+@@ -1493,8 +1496,8 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
+
+ /* Called from driver internal vblank counter query code? */
+ if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+- /* Caller wants distance from real vbl_start in *hpos */
+- *hpos = *vpos - vbl_start;
++ /* Caller wants distance from real vbl_start in *hpos */
++ *hpos = *vpos - vbl_start;
+ }
+
+ /* Fudge vblank to start a few scanlines earlier to handle the
+@@ -1516,7 +1519,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
+
+ /* In vblank? */
+ if (in_vbl)
+- ret |= DRM_SCANOUTPOS_IN_VBLANK;
++ ret |= DRM_SCANOUTPOS_IN_VBLANK;
+
+ /* Called from driver internal vblank counter query code? */
+ if (flags & GET_DISTANCE_TO_VBLANKSTART) {
+@@ -1622,6 +1625,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
+
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
++
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(aobj);
+@@ -1629,9 +1633,9 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
+ }
+ }
+
+- if (fb == NULL || fb->obj[0] == NULL) {
++ if (!fb || !fb->obj[0])
+ continue;
+- }
++
+ robj = gem_to_amdgpu_bo(fb->obj[0]);
+ if (!amdgpu_display_robj_is_fb(adev, robj)) {
+ r = amdgpu_bo_reserve(robj, true);
+@@ -1658,6 +1662,7 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
+
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
++
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+index 4d9eb0137f8c4..d6c4293829aab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+@@ -79,6 +79,8 @@
+ * That is, for an I2C EEPROM driver everything is controlled by
+ * the "eeprom_addr".
+ *
++ * See also top of amdgpu_ras_eeprom.c.
++ *
+ * P.S. If you need to write, lock and read the Identification Page,
+ * (M24M02-DR device only, which we do not use), change the "7" to
+ * "0xF" in the macro below, and let the client set bit 20 to 1 in
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+index 84c241b9a2a13..f5f747cfe90a1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+@@ -33,12 +33,29 @@
+
+ #include "amdgpu_reset.h"
+
+-#define EEPROM_I2C_MADDR_VEGA20 0x0
+-#define EEPROM_I2C_MADDR_ARCTURUS 0x40000
+-#define EEPROM_I2C_MADDR_ARCTURUS_D342 0x0
+-#define EEPROM_I2C_MADDR_SIENNA_CICHLID 0x0
+-#define EEPROM_I2C_MADDR_ALDEBARAN 0x0
+-#define EEPROM_I2C_MADDR_SMU_13_0_0 (0x54UL << 16)
++/* These are memory addresses as would be seen by one or more EEPROM
++ * chips strung on the I2C bus, usually by manipulating pins 1-3 of a
++ * set of EEPROM devices. They form a continuous memory space.
++ *
++ * The I2C device address includes the device type identifier, 1010b,
++ * which is a reserved value and indicates that this is an I2C EEPROM
++ * device. It also includes the top 3 bits of the 19 bit EEPROM memory
++ * address, namely bits 18, 17, and 16. This makes up the 7 bit
++ * address sent on the I2C bus with bit 0 being the direction bit,
++ * which is not represented here, and sent by the hardware directly.
++ *
++ * For instance,
++ * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0.
++ * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h.
++ * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h.
++ * Depending on the size of the I2C EEPROM device(s), bits 18:16 may
++ * address memory in a device or a device on the I2C bus, depending on
++ * the status of pins 1-3. See top of amdgpu_eeprom.c.
++ *
++ * The RAS table lives either at address 0 or address 40000h of EEPROM.
++ */
++#define EEPROM_I2C_MADDR_0 0x0
++#define EEPROM_I2C_MADDR_4 0x40000
+
+ /*
+ * The 2 macros bellow represent the actual size in bytes that
+@@ -90,33 +107,23 @@
+
+ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
+ {
+- return adev->asic_type == CHIP_VEGA20 ||
+- adev->asic_type == CHIP_ARCTURUS ||
+- adev->asic_type == CHIP_SIENNA_CICHLID ||
+- adev->asic_type == CHIP_ALDEBARAN;
+-}
+-
+-static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
+- struct amdgpu_ras_eeprom_control *control)
+-{
+- struct atom_context *atom_ctx = adev->mode_info.atom_context;
+-
+- if (!control || !atom_ctx)
++ switch (adev->ip_versions[MP1_HWIP][0]) {
++ case IP_VERSION(11, 0, 2): /* VEGA20 and ARCTURUS */
++ case IP_VERSION(11, 0, 7): /* Sienna cichlid */
++ case IP_VERSION(13, 0, 0):
++ case IP_VERSION(13, 0, 2): /* Aldebaran */
++ case IP_VERSION(13, 0, 6):
++ case IP_VERSION(13, 0, 10):
++ return true;
++ default:
+ return false;
+-
+- if (strnstr(atom_ctx->vbios_version,
+- "D342",
+- sizeof(atom_ctx->vbios_version)))
+- control->i2c_address = EEPROM_I2C_MADDR_ARCTURUS_D342;
+- else
+- control->i2c_address = EEPROM_I2C_MADDR_ARCTURUS;
+-
+- return true;
++ }
+ }
+
+ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
+ struct amdgpu_ras_eeprom_control *control)
+ {
++ struct atom_context *atom_ctx = adev->mode_info.atom_context;
+ u8 i2c_addr;
+
+ if (!control)
+@@ -137,36 +144,42 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
+ return true;
+ }
+
+- switch (adev->asic_type) {
+- case CHIP_VEGA20:
+- control->i2c_address = EEPROM_I2C_MADDR_VEGA20;
+- break;
+-
+- case CHIP_ARCTURUS:
+- return __get_eeprom_i2c_addr_arct(adev, control);
+-
+- case CHIP_SIENNA_CICHLID:
+- control->i2c_address = EEPROM_I2C_MADDR_SIENNA_CICHLID;
+- break;
+-
+- case CHIP_ALDEBARAN:
+- control->i2c_address = EEPROM_I2C_MADDR_ALDEBARAN;
+- break;
+-
+- default:
+- return false;
+- }
+-
+ switch (adev->ip_versions[MP1_HWIP][0]) {
++ case IP_VERSION(11, 0, 2):
++ /* VEGA20 and ARCTURUS */
++ if (adev->asic_type == CHIP_VEGA20)
++ control->i2c_address = EEPROM_I2C_MADDR_0;
++ else if (strnstr(atom_ctx->vbios_version,
++ "D342",
++ sizeof(atom_ctx->vbios_version)))
++ control->i2c_address = EEPROM_I2C_MADDR_0;
++ else
++ control->i2c_address = EEPROM_I2C_MADDR_4;
++ return true;
++ case IP_VERSION(11, 0, 7):
++ control->i2c_address = EEPROM_I2C_MADDR_0;
++ return true;
++ case IP_VERSION(13, 0, 2):
++ if (strnstr(atom_ctx->vbios_version, "D673",
++ sizeof(atom_ctx->vbios_version)))
++ control->i2c_address = EEPROM_I2C_MADDR_4;
++ else
++ control->i2c_address = EEPROM_I2C_MADDR_0;
++ return true;
+ case IP_VERSION(13, 0, 0):
+- control->i2c_address = EEPROM_I2C_MADDR_SMU_13_0_0;
+- break;
+-
++ if (strnstr(atom_ctx->vbios_pn, "D707",
++ sizeof(atom_ctx->vbios_pn)))
++ control->i2c_address = EEPROM_I2C_MADDR_0;
++ else
++ control->i2c_address = EEPROM_I2C_MADDR_4;
++ return true;
++ case IP_VERSION(13, 0, 6):
++ case IP_VERSION(13, 0, 10):
++ control->i2c_address = EEPROM_I2C_MADDR_4;
++ return true;
+ default:
+- break;
++ return false;
+ }
+-
+- return true;
+ }
+
+ static void
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 23e7e5126eae6..66a6f7a37ebcf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -397,7 +397,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ cpu_ptr = &adev->wb.wb[index];
+
+- r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
++ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 7f0b18b0d4c48..71ef25425c7f6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -883,8 +883,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+- r = amdgpu_ib_get(adev, NULL, 16,
+- AMDGPU_IB_POOL_DIRECT, &ib);
++
++ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
+ goto err1;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index fe371022e5104..84ca601f7d5f3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1034,8 +1034,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+- r = amdgpu_ib_get(adev, NULL, 16,
+- AMDGPU_IB_POOL_DIRECT, &ib);
++
++ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
+ goto err1;
+
+diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
+index 57946d80b02db..12baf9ba03c96 100644
+--- a/drivers/gpu/drm/bridge/Kconfig
++++ b/drivers/gpu/drm/bridge/Kconfig
+@@ -309,6 +309,7 @@ config DRM_TOSHIBA_TC358768
+ select REGMAP_I2C
+ select DRM_PANEL
+ select DRM_MIPI_DSI
++ select VIDEOMODE_HELPERS
+ help
+ Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.
+
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
+index 8219310025de5..f7422f0cf579d 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.c
++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
+@@ -1500,6 +1500,13 @@ static void gen11_dsi_post_disable(struct intel_atomic_state *state,
+ static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
++ struct drm_i915_private *i915 = to_i915(connector->dev);
++ enum drm_mode_status status;
++
++ status = intel_cpu_transcoder_mode_valid(i915, mode);
++ if (status != MODE_OK)
++ return status;
++
+ /* FIXME: DSC? */
+ return intel_dsi_mode_valid(connector, mode);
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
+index 4a8ff2f976085..e60b2cf84b851 100644
+--- a/drivers/gpu/drm/i915/display/intel_crt.c
++++ b/drivers/gpu/drm/i915/display/intel_crt.c
+@@ -343,8 +343,13 @@ intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int max_dotclk = dev_priv->max_dotclk_freq;
++ enum drm_mode_status status;
+ int max_clock;
+
++ status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
++ if (status != MODE_OK)
++ return status;
++
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 96e679a176e94..1777a12f2f421 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -8229,6 +8229,16 @@ intel_mode_valid(struct drm_device *dev,
+ mode->vtotal > vtotal_max)
+ return MODE_V_ILLEGAL;
+
++ return MODE_OK;
++}
++
++enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv,
++ const struct drm_display_mode *mode)
++{
++ /*
++ * Additional transcoder timing limits,
++ * excluding BXT/GLK DSI transcoders.
++ */
+ if (DISPLAY_VER(dev_priv) >= 5) {
+ if (mode->hdisplay < 64 ||
+ mode->htotal - mode->hdisplay < 32)
+diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
+index 884e8e67b17c7..b4f941674357b 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.h
++++ b/drivers/gpu/drm/i915/display/intel_display.h
+@@ -554,6 +554,9 @@ enum drm_mode_status
+ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode,
+ bool bigjoiner);
++enum drm_mode_status
++intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
++ const struct drm_display_mode *mode);
+ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
+ bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
+ bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state);
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 594ea037050a9..5970f4149090f 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -973,8 +973,9 @@ intel_dp_mode_valid(struct drm_connector *_connector,
+ enum drm_mode_status status;
+ bool dsc = false, bigjoiner = false;
+
+- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+- return MODE_NO_DBLESCAN;
++ status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
++ if (status != MODE_OK)
++ return status;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+index 9a6822256ddf6..eec32f682012c 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -703,6 +703,10 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
+ return 0;
+ }
+
++ *status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
++ if (*status != MODE_OK)
++ return 0;
++
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ *status = MODE_NO_DBLESCAN;
+ return 0;
+diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
+index 5572e43026e4d..511c589070087 100644
+--- a/drivers/gpu/drm/i915/display/intel_dvo.c
++++ b/drivers/gpu/drm/i915/display/intel_dvo.c
+@@ -225,10 +225,16 @@ intel_dvo_mode_valid(struct drm_connector *connector,
+ {
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(intel_connector);
++ struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(intel_connector, mode);
+ int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int target_clock = mode->clock;
++ enum drm_mode_status status;
++
++ status = intel_cpu_transcoder_mode_valid(i915, mode);
++ if (status != MODE_OK)
++ return status;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
+index 7816b2a33feeb..2600019fc8b96 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
+@@ -1987,8 +1987,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
+ bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
+ bool ycbcr_420_only;
+
+- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+- return MODE_NO_DBLESCAN;
++ status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
++ if (status != MODE_OK)
++ return status;
+
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
+ clock *= 2;
+diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
+index a749a5a66d624..40b5d3d3c7e14 100644
+--- a/drivers/gpu/drm/i915/display/intel_lvds.c
++++ b/drivers/gpu/drm/i915/display/intel_lvds.c
+@@ -92,9 +92,9 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+- *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT;
++ *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val);
+ else
+- *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT;
++ *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val);
+
+ return val & LVDS_PORT_EN;
+ }
+@@ -389,11 +389,16 @@ intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+ struct intel_connector *intel_connector = to_intel_connector(connector);
++ struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(intel_connector, mode);
+ int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
+ enum drm_mode_status status;
+
++ status = intel_cpu_transcoder_mode_valid(i915, mode);
++ if (status != MODE_OK)
++ return status;
++
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
+index 2c2e0f041f869..8294dddfd9de8 100644
+--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
+@@ -115,7 +115,6 @@ struct intel_sdvo {
+
+ enum port port;
+
+- bool has_hdmi_monitor;
+ bool has_hdmi_audio;
+
+ /* DDC bus used by this SDVO encoder */
+@@ -1278,10 +1277,13 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
+ pipe_config->clock_set = true;
+ }
+
+-static bool intel_has_hdmi_sink(struct intel_sdvo *sdvo,
++static bool intel_has_hdmi_sink(struct intel_sdvo_connector *intel_sdvo_connector,
+ const struct drm_connector_state *conn_state)
+ {
+- return sdvo->has_hdmi_monitor &&
++ struct drm_connector *connector = conn_state->connector;
++
++ return intel_sdvo_connector->is_hdmi &&
++ connector->display_info.is_hdmi &&
+ READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
+ }
+
+@@ -1360,7 +1362,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
+ pipe_config->pixel_multiplier =
+ intel_sdvo_get_pixel_multiplier(adjusted_mode);
+
+- pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, conn_state);
++ pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, conn_state);
+
+ if (pipe_config->has_hdmi_sink) {
+ if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO)
+@@ -1871,13 +1873,19 @@ static enum drm_mode_status
+ intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
++ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(connector);
+- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+- bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, connector->state);
++ bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state);
++ int max_dotclk = i915->max_dotclk_freq;
++ enum drm_mode_status status;
+ int clock = mode->clock;
+
++ status = intel_cpu_transcoder_mode_valid(i915, mode);
++ if (status != MODE_OK)
++ return status;
++
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+@@ -2064,7 +2072,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (intel_sdvo_connector->is_hdmi) {
+- intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ }
+ } else
+@@ -2116,7 +2123,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
+
+ intel_sdvo->attached_output = response;
+
+- intel_sdvo->has_hdmi_monitor = false;
+ intel_sdvo->has_hdmi_audio = false;
+
+ if ((intel_sdvo_connector->output_flag & response) == 0)
+diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
+index dcf89d701f0f6..fb25be800e753 100644
+--- a/drivers/gpu/drm/i915/display/intel_tv.c
++++ b/drivers/gpu/drm/i915/display/intel_tv.c
+@@ -956,8 +956,14 @@ static enum drm_mode_status
+ intel_tv_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
++ struct drm_i915_private *i915 = to_i915(connector->dev);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
+- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
++ int max_dotclk = i915->max_dotclk_freq;
++ enum drm_mode_status status;
++
++ status = intel_cpu_transcoder_mode_valid(i915, mode);
++ if (status != MODE_OK)
++ return status;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
+index 00c80f29ad999..114088ca59ed4 100644
+--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
+@@ -1627,9 +1627,25 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
+ .destroy = intel_dsi_encoder_destroy,
+ };
+
++static enum drm_mode_status vlv_dsi_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct drm_i915_private *i915 = to_i915(connector->dev);
++
++ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
++ enum drm_mode_status status;
++
++ status = intel_cpu_transcoder_mode_valid(i915, mode);
++ if (status != MODE_OK)
++ return status;
++ }
++
++ return intel_dsi_mode_valid(connector, mode);
++}
++
+ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
+ .get_modes = intel_dsi_get_modes,
+- .mode_valid = intel_dsi_mode_valid,
++ .mode_valid = vlv_dsi_mode_valid,
+ .atomic_check = intel_digital_connector_atomic_check,
+ };
+
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 25015996f627a..c6766704340eb 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2681,52 +2681,50 @@
+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+-#define LVDS_PORT_EN (1 << 31)
++#define LVDS_PORT_EN REG_BIT(31)
+ /* Selects pipe B for LVDS data. Must be set on pre-965. */
+-#define LVDS_PIPE_SEL_SHIFT 30
+-#define LVDS_PIPE_SEL_MASK (1 << 30)
+-#define LVDS_PIPE_SEL(pipe) ((pipe) << 30)
+-#define LVDS_PIPE_SEL_SHIFT_CPT 29
+-#define LVDS_PIPE_SEL_MASK_CPT (3 << 29)
+-#define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29)
++#define LVDS_PIPE_SEL_MASK REG_BIT(30)
++#define LVDS_PIPE_SEL(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK, (pipe))
++#define LVDS_PIPE_SEL_MASK_CPT REG_GENMASK(30, 29)
++#define LVDS_PIPE_SEL_CPT(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK_CPT, (pipe))
+ /* LVDS dithering flag on 965/g4x platform */
+-#define LVDS_ENABLE_DITHER (1 << 25)
++#define LVDS_ENABLE_DITHER REG_BIT(25)
+ /* LVDS sync polarity flags. Set to invert (i.e. negative) */
+-#define LVDS_VSYNC_POLARITY (1 << 21)
+-#define LVDS_HSYNC_POLARITY (1 << 20)
++#define LVDS_VSYNC_POLARITY REG_BIT(21)
++#define LVDS_HSYNC_POLARITY REG_BIT(20)
+
+ /* Enable border for unscaled (or aspect-scaled) display */
+-#define LVDS_BORDER_ENABLE (1 << 15)
++#define LVDS_BORDER_ENABLE REG_BIT(15)
+ /*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+-#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
+-#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
+-#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
++#define LVDS_A0A2_CLKA_POWER_MASK REG_GENMASK(9, 8)
++#define LVDS_A0A2_CLKA_POWER_DOWN REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 0)
++#define LVDS_A0A2_CLKA_POWER_UP REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 3)
+ /*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+-#define LVDS_A3_POWER_MASK (3 << 6)
+-#define LVDS_A3_POWER_DOWN (0 << 6)
+-#define LVDS_A3_POWER_UP (3 << 6)
++#define LVDS_A3_POWER_MASK REG_GENMASK(7, 6)
++#define LVDS_A3_POWER_DOWN REG_FIELD_PREP(LVDS_A3_POWER_MASK, 0)
++#define LVDS_A3_POWER_UP REG_FIELD_PREP(LVDS_A3_POWER_MASK, 3)
+ /*
+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+-#define LVDS_CLKB_POWER_MASK (3 << 4)
+-#define LVDS_CLKB_POWER_DOWN (0 << 4)
+-#define LVDS_CLKB_POWER_UP (3 << 4)
++#define LVDS_CLKB_POWER_MASK REG_GENMASK(5, 4)
++#define LVDS_CLKB_POWER_DOWN REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 0)
++#define LVDS_CLKB_POWER_UP REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 3)
+ /*
+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode. The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+-#define LVDS_B0B3_POWER_MASK (3 << 2)
+-#define LVDS_B0B3_POWER_DOWN (0 << 2)
+-#define LVDS_B0B3_POWER_UP (3 << 2)
++#define LVDS_B0B3_POWER_MASK REG_GENMASK(3, 2)
++#define LVDS_B0B3_POWER_DOWN REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 0)
++#define LVDS_B0B3_POWER_UP REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 3)
+
+ /* Video Data Island Packet control */
+ #define VIDEO_DIP_DATA _MMIO(0x61178)
+@@ -6461,7 +6459,7 @@
+ #define FDI_PLL_CTL_2 _MMIO(0xfe004)
+
+ #define PCH_LVDS _MMIO(0xe1180)
+-#define LVDS_DETECTED (1 << 1)
++#define LVDS_DETECTED REG_BIT(1)
+
+ #define _PCH_DP_B 0xe4100
+ #define PCH_DP_B _MMIO(_PCH_DP_B)
+diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
+index 0962c12eba5a0..2147afb725581 100644
+--- a/drivers/hwmon/acpi_power_meter.c
++++ b/drivers/hwmon/acpi_power_meter.c
+@@ -31,6 +31,7 @@
+ #define POWER_METER_CAN_NOTIFY (1 << 3)
+ #define POWER_METER_IS_BATTERY (1 << 8)
+ #define UNKNOWN_HYSTERESIS 0xFFFFFFFF
++#define UNKNOWN_POWER 0xFFFFFFFF
+
+ #define METER_NOTIFY_CONFIG 0x80
+ #define METER_NOTIFY_TRIP 0x81
+@@ -348,6 +349,9 @@ static ssize_t show_power(struct device *dev,
+ update_meter(resource);
+ mutex_unlock(&resource->lock);
+
++ if (resource->power == UNKNOWN_POWER)
++ return -ENODATA;
++
+ return sprintf(buf, "%llu\n", resource->power * 1000);
+ }
+
+diff --git a/drivers/hwmon/nzxt-kraken2.c b/drivers/hwmon/nzxt-kraken2.c
+index 89f7ea4f42d47..badbcaf01f90b 100644
+--- a/drivers/hwmon/nzxt-kraken2.c
++++ b/drivers/hwmon/nzxt-kraken2.c
+@@ -161,13 +161,13 @@ static int kraken2_probe(struct hid_device *hdev,
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "hid hw start failed with %d\n", ret);
+- goto fail_and_stop;
++ return ret;
+ }
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "hid hw open failed with %d\n", ret);
+- goto fail_and_close;
++ goto fail_and_stop;
+ }
+
+ priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "kraken2",
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+index 1cf7478da6ee8..fda48a0afc1a5 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -2072,7 +2072,7 @@ static void clear_etmdrvdata(void *info)
+ etmdrvdata[cpu] = NULL;
+ }
+
+-static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
++static void etm4_remove_dev(struct etmv4_drvdata *drvdata)
+ {
+ etm_perf_symlink(drvdata->csdev, false);
+ /*
+@@ -2094,10 +2094,9 @@ static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
+ cscfg_unregister_csdev(drvdata->csdev);
+ coresight_unregister(drvdata->csdev);
+
+- return 0;
+ }
+
+-static void __exit etm4_remove_amba(struct amba_device *adev)
++static void etm4_remove_amba(struct amba_device *adev)
+ {
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+@@ -2105,15 +2104,14 @@ static void __exit etm4_remove_amba(struct amba_device *adev)
+ etm4_remove_dev(drvdata);
+ }
+
+-static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
++static int etm4_remove_platform_dev(struct platform_device *pdev)
+ {
+- int ret = 0;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
+
+ if (drvdata)
+- ret = etm4_remove_dev(drvdata);
++ etm4_remove_dev(drvdata);
+ pm_runtime_disable(&pdev->dev);
+- return ret;
++ return 0;
+ }
+
+ static const struct amba_id etm4_ids[] = {
+diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
+index 4140efd664097..016220ba0addd 100644
+--- a/drivers/hwtracing/ptt/hisi_ptt.c
++++ b/drivers/hwtracing/ptt/hisi_ptt.c
+@@ -837,6 +837,10 @@ static void hisi_ptt_pmu_del(struct perf_event *event, int flags)
+ hisi_ptt_pmu_stop(event, PERF_EF_UPDATE);
+ }
+
++static void hisi_ptt_pmu_read(struct perf_event *event)
++{
++}
++
+ static void hisi_ptt_remove_cpuhp_instance(void *hotplug_node)
+ {
+ cpuhp_state_remove_instance_nocalls(hisi_ptt_pmu_online, hotplug_node);
+@@ -880,6 +884,7 @@ static int hisi_ptt_register_pmu(struct hisi_ptt *hisi_ptt)
+ .stop = hisi_ptt_pmu_stop,
+ .add = hisi_ptt_pmu_add,
+ .del = hisi_ptt_pmu_del,
++ .read = hisi_ptt_pmu_read,
+ };
+
+ reg = readl(hisi_ptt->iobase + HISI_PTT_LOCATION);
+diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
+index 6fdb25a5f8016..ad98c85ec2e7a 100644
+--- a/drivers/i2c/busses/i2c-designware-common.c
++++ b/drivers/i2c/busses/i2c-designware-common.c
+@@ -63,7 +63,7 @@ static int dw_reg_read(void *context, unsigned int reg, unsigned int *val)
+ {
+ struct dw_i2c_dev *dev = context;
+
+- *val = readl_relaxed(dev->base + reg);
++ *val = readl(dev->base + reg);
+
+ return 0;
+ }
+@@ -72,7 +72,7 @@ static int dw_reg_write(void *context, unsigned int reg, unsigned int val)
+ {
+ struct dw_i2c_dev *dev = context;
+
+- writel_relaxed(val, dev->base + reg);
++ writel(val, dev->base + reg);
+
+ return 0;
+ }
+@@ -81,7 +81,7 @@ static int dw_reg_read_swab(void *context, unsigned int reg, unsigned int *val)
+ {
+ struct dw_i2c_dev *dev = context;
+
+- *val = swab32(readl_relaxed(dev->base + reg));
++ *val = swab32(readl(dev->base + reg));
+
+ return 0;
+ }
+@@ -90,7 +90,7 @@ static int dw_reg_write_swab(void *context, unsigned int reg, unsigned int val)
+ {
+ struct dw_i2c_dev *dev = context;
+
+- writel_relaxed(swab32(val), dev->base + reg);
++ writel(swab32(val), dev->base + reg);
+
+ return 0;
+ }
+@@ -99,8 +99,8 @@ static int dw_reg_read_word(void *context, unsigned int reg, unsigned int *val)
+ {
+ struct dw_i2c_dev *dev = context;
+
+- *val = readw_relaxed(dev->base + reg) |
+- (readw_relaxed(dev->base + reg + 2) << 16);
++ *val = readw(dev->base + reg) |
++ (readw(dev->base + reg + 2) << 16);
+
+ return 0;
+ }
+@@ -109,8 +109,8 @@ static int dw_reg_write_word(void *context, unsigned int reg, unsigned int val)
+ {
+ struct dw_i2c_dev *dev = context;
+
+- writew_relaxed(val, dev->base + reg);
+- writew_relaxed(val >> 16, dev->base + reg + 2);
++ writew(val, dev->base + reg);
++ writew(val >> 16, dev->base + reg + 2);
+
+ return 0;
+ }
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 957634eceba8f..8ce569bf7525e 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -96,12 +96,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ return page_size;
+ }
+
+- /* rdma_for_each_block() has a bug if the page size is smaller than the
+- * page size used to build the umem. For now prevent smaller page sizes
+- * from being returned.
+- */
+- pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
+-
+ /* The best result is the smallest page size that results in the minimum
+ * number of required pages. Compute the largest page size that could
+ * work based on VA address bits that don't change.
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index e58893387bb4d..43d396a7d8e16 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -70,7 +70,7 @@ static char version[] =
+ BNXT_RE_DESC "\n";
+
+ MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
+-MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
++MODULE_DESCRIPTION(BNXT_RE_DESC);
+ MODULE_LICENSE("Dual BSD/GPL");
+
+ /* globals */
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 8a9d28f81149a..c2ee80546d120 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4913,10 +4913,15 @@ static int check_cong_type(struct ib_qp *ibqp,
+ cong_alg->wnd_mode_sel = WND_LIMIT;
+ break;
+ default:
+- ibdev_err(&hr_dev->ib_dev,
+- "error type(%u) for congestion selection.\n",
+- hr_dev->caps.cong_type);
+- return -EINVAL;
++ ibdev_warn(&hr_dev->ib_dev,
++ "invalid type(%u) for congestion selection.\n",
++ hr_dev->caps.cong_type);
++ hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
++ cong_alg->alg_sel = CONG_DCQCN;
++ cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
++ cong_alg->dip_vld = DIP_INVALID;
++ cong_alg->wnd_mode_sel = WND_LIMIT;
++ break;
+ }
+
+ return 0;
+diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
+index c07ce85d243f1..311a1138e838d 100644
+--- a/drivers/infiniband/hw/irdma/hw.c
++++ b/drivers/infiniband/hw/irdma/hw.c
+@@ -322,7 +322,11 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
+ break;
+ case IRDMA_AE_QP_SUSPEND_COMPLETE:
+ if (iwqp->iwdev->vsi.tc_change_pending) {
+- atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs);
++ if (!atomic_dec_return(&qp->vsi->qp_suspend_reqs))
++ wake_up(&iwqp->iwdev->suspend_wq);
++ }
++ if (iwqp->suspend_pending) {
++ iwqp->suspend_pending = false;
+ wake_up(&iwqp->iwdev->suspend_wq);
+ }
+ break;
+@@ -568,16 +572,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
+ * Issue destroy cqp request and
+ * free the resources associated with the cqp
+ */
+-static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
++static void irdma_destroy_cqp(struct irdma_pci_f *rf)
+ {
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cqp *cqp = &rf->cqp;
+ int status = 0;
+
+- if (rf->cqp_cmpl_wq)
+- destroy_workqueue(rf->cqp_cmpl_wq);
+- if (free_hwcqp)
+- status = irdma_sc_cqp_destroy(dev->cqp);
++ status = irdma_sc_cqp_destroy(dev->cqp);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
+
+@@ -741,6 +742,9 @@ static void irdma_destroy_ccq(struct irdma_pci_f *rf)
+ struct irdma_ccq *ccq = &rf->ccq;
+ int status = 0;
+
++ if (rf->cqp_cmpl_wq)
++ destroy_workqueue(rf->cqp_cmpl_wq);
++
+ if (!rf->reset)
+ status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
+ if (status)
+@@ -921,8 +925,8 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
+
+ cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
+ if (!cqp->scratch_array) {
+- kfree(cqp->cqp_requests);
+- return -ENOMEM;
++ status = -ENOMEM;
++ goto err_scratch;
+ }
+
+ dev->cqp = &cqp->sc_cqp;
+@@ -932,15 +936,14 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
+ cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
+ &cqp->sq.pa, GFP_KERNEL);
+ if (!cqp->sq.va) {
+- kfree(cqp->scratch_array);
+- kfree(cqp->cqp_requests);
+- return -ENOMEM;
++ status = -ENOMEM;
++ goto err_sq;
+ }
+
+ status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
+ IRDMA_HOST_CTX_ALIGNMENT_M);
+ if (status)
+- goto exit;
++ goto err_ctx;
+
+ dev->cqp->host_ctx_pa = mem.pa;
+ dev->cqp->host_ctx = mem.va;
+@@ -966,7 +969,7 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
+ status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
+ if (status) {
+ ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
+- goto exit;
++ goto err_ctx;
+ }
+
+ spin_lock_init(&cqp->req_lock);
+@@ -977,7 +980,7 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: cqp create failed - status %d maj_err %d min_err %d\n",
+ status, maj_err, min_err);
+- goto exit;
++ goto err_ctx;
+ }
+
+ INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
+@@ -991,8 +994,16 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
+ init_waitqueue_head(&cqp->remove_wq);
+ return 0;
+
+-exit:
+- irdma_destroy_cqp(rf, false);
++err_ctx:
++ dma_free_coherent(dev->hw->device, cqp->sq.size,
++ cqp->sq.va, cqp->sq.pa);
++ cqp->sq.va = NULL;
++err_sq:
++ kfree(cqp->scratch_array);
++ cqp->scratch_array = NULL;
++err_scratch:
++ kfree(cqp->cqp_requests);
++ cqp->cqp_requests = NULL;
+
+ return status;
+ }
+@@ -1159,7 +1170,6 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
+ int status;
+ struct irdma_ceq_init_info info = {};
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+- u64 scratch;
+ u32 ceq_size;
+
+ info.ceq_id = ceq_id;
+@@ -1180,14 +1190,13 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
+ iwceq->sc_ceq.ceq_id = ceq_id;
+ info.dev = dev;
+ info.vsi = vsi;
+- scratch = (uintptr_t)&rf->cqp.sc_cqp;
+ status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
+ if (!status) {
+ if (dev->ceq_valid)
+ status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
+ IRDMA_OP_CEQ_CREATE);
+ else
+- status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch);
++ status = irdma_sc_cceq_create(&iwceq->sc_ceq, 0);
+ }
+
+ if (status) {
+@@ -1740,7 +1749,7 @@ void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
+ rf->reset, rf->rdma_ver);
+ fallthrough;
+ case CQP_CREATED:
+- irdma_destroy_cqp(rf, true);
++ irdma_destroy_cqp(rf);
+ fallthrough;
+ case INITIAL_STATE:
+ irdma_del_init_mem(rf);
+diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
+index 514453777e07d..be1030d1adfaf 100644
+--- a/drivers/infiniband/hw/irdma/main.c
++++ b/drivers/infiniband/hw/irdma/main.c
+@@ -48,7 +48,7 @@ static void irdma_prep_tc_change(struct irdma_device *iwdev)
+ /* Wait for all qp's to suspend */
+ wait_event_timeout(iwdev->suspend_wq,
+ !atomic_read(&iwdev->vsi.qp_suspend_reqs),
+- IRDMA_EVENT_TIMEOUT);
++ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
+ irdma_ws_reset(&iwdev->vsi);
+ }
+
+diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
+index 9cbe64311f985..6a6b14d8fca45 100644
+--- a/drivers/infiniband/hw/irdma/main.h
++++ b/drivers/infiniband/hw/irdma/main.h
+@@ -78,7 +78,7 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
+
+ #define MAX_DPC_ITERATIONS 128
+
+-#define IRDMA_EVENT_TIMEOUT 50000
++#define IRDMA_EVENT_TIMEOUT_MS 5000
+ #define IRDMA_VCHNL_EVENT_TIMEOUT 100000
+ #define IRDMA_RST_TIMEOUT_HZ 4
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 3b8b2341981ea..447e1bcc82a32 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -1098,6 +1098,21 @@ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ return 0;
+ }
+
++static int irdma_wait_for_suspend(struct irdma_qp *iwqp)
++{
++ if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
++ !iwqp->suspend_pending,
++ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) {
++ iwqp->suspend_pending = false;
++ ibdev_warn(&iwqp->iwdev->ibdev,
++ "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n",
++ iwqp->ibqp.qp_num, iwqp->last_aeq);
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
+ /**
+ * irdma_modify_qp_roce - modify qp request
+ * @ibqp: qp's pointer for modify
+@@ -1359,17 +1374,11 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+
+ info.next_iwarp_state = IRDMA_QP_STATE_SQD;
+ issue_modify_qp = 1;
++ iwqp->suspend_pending = true;
+ break;
+ case IB_QPS_SQE:
+ case IB_QPS_ERR:
+ case IB_QPS_RESET:
+- if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
+- spin_unlock_irqrestore(&iwqp->lock, flags);
+- info.next_iwarp_state = IRDMA_QP_STATE_SQD;
+- irdma_hw_modify_qp(iwdev, iwqp, &info, true);
+- spin_lock_irqsave(&iwqp->lock, flags);
+- }
+-
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (udata && udata->inlen) {
+@@ -1406,6 +1415,11 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ ctx_info->rem_endpoint_idx = udp_info->arp_idx;
+ if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
+ return -EINVAL;
++ if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) {
++ ret = irdma_wait_for_suspend(iwqp);
++ if (ret)
++ return ret;
++ }
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->iwarp_state == info.curr_iwarp_state) {
+ iwqp->iwarp_state = info.next_iwarp_state;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index a536e9fa85ebf..9f9e273bbff3e 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -193,6 +193,7 @@ struct irdma_qp {
+ u8 flush_issued : 1;
+ u8 sig_all : 1;
+ u8 pau_mode : 1;
++ u8 suspend_pending : 1;
+ u8 rsvd : 1;
+ u8 iwarp_state;
+ u16 term_sq_flush_code;
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index a67f58359de9e..cc07c91f9c549 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -382,7 +382,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ struct rtrs_clt_path *clt_path;
+ int err;
+
+- if (WARN_ON(!req->in_use))
++ if (!req->in_use)
+ return;
+ if (WARN_ON(!req->con))
+ return;
+@@ -1694,7 +1694,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
+ clt_path->s.dev_ref++;
+ max_send_wr = min_t(int, wr_limit,
+ /* QD * (REQ + RSP + FR REGS or INVS) + drain */
+- clt_path->queue_depth * 3 + 1);
++ clt_path->queue_depth * 4 + 1);
+ max_recv_wr = min_t(int, wr_limit,
+ clt_path->queue_depth * 3 + 1);
+ max_send_sge = 2;
+@@ -2346,8 +2346,6 @@ static int init_conns(struct rtrs_clt_path *clt_path)
+ if (err)
+ goto destroy;
+
+- rtrs_start_hb(&clt_path->s);
+-
+ return 0;
+
+ destroy:
+@@ -2621,6 +2619,7 @@ static int init_path(struct rtrs_clt_path *clt_path)
+ goto out;
+ }
+ rtrs_clt_path_up(clt_path);
++ rtrs_start_hb(&clt_path->s);
+ out:
+ mutex_unlock(&clt_path->init_mutex);
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index 22d7ba05e9fe8..e978ee4bb73ae 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -63,8 +63,9 @@ static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
+ {
+ enum rtrs_srv_state old_state;
+ bool changed = false;
++ unsigned long flags;
+
+- spin_lock_irq(&srv_path->state_lock);
++ spin_lock_irqsave(&srv_path->state_lock, flags);
+ old_state = srv_path->state;
+ switch (new_state) {
+ case RTRS_SRV_CONNECTED:
+@@ -85,7 +86,7 @@ static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
+ }
+ if (changed)
+ srv_path->state = new_state;
+- spin_unlock_irq(&srv_path->state_lock);
++ spin_unlock_irqrestore(&srv_path->state_lock, flags);
+
+ return changed;
+ }
+@@ -548,7 +549,10 @@ static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
+ struct rtrs_srv_mr *srv_mr;
+
+ srv_mr = &srv_path->mrs[i];
+- rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
++
++ if (always_invalidate)
++ rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
++
+ ib_dereg_mr(srv_mr->mr);
+ ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
+ srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
+@@ -714,20 +718,23 @@ static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+ WARN_ON(wc->opcode != IB_WC_SEND);
+ }
+
+-static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
++static int rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
+ {
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+- int up;
++ int up, ret = 0;
+
+ mutex_lock(&srv->paths_ev_mutex);
+ up = ++srv->paths_up;
+ if (up == 1)
+- ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
++ ret = ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
+ mutex_unlock(&srv->paths_ev_mutex);
+
+ /* Mark session as established */
+- srv_path->established = true;
++ if (!ret)
++ srv_path->established = true;
++
++ return ret;
+ }
+
+ static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
+@@ -856,7 +863,12 @@ static int process_info_req(struct rtrs_srv_con *con,
+ goto iu_free;
+ kobject_get(&srv_path->kobj);
+ get_device(&srv_path->srv->dev);
+- rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
++ err = rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
++ if (!err) {
++ rtrs_err(s, "rtrs_srv_change_state(), err: %d\n", err);
++ goto iu_free;
++ }
++
+ rtrs_srv_start_hb(srv_path);
+
+ /*
+@@ -865,7 +877,11 @@ static int process_info_req(struct rtrs_srv_con *con,
+ * all connections are successfully established. Thus, simply notify
+ * listener with a proper event if we are the first path.
+ */
+- rtrs_srv_path_up(srv_path);
++ err = rtrs_srv_path_up(srv_path);
++ if (err) {
++ rtrs_err(s, "rtrs_srv_path_up(), err: %d\n", err);
++ goto iu_free;
++ }
+
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
+ tx_iu->dma_addr,
+@@ -1521,7 +1537,6 @@ static void rtrs_srv_close_work(struct work_struct *work)
+
+ srv_path = container_of(work, typeof(*srv_path), close_work);
+
+- rtrs_srv_destroy_path_files(srv_path);
+ rtrs_srv_stop_hb(srv_path);
+
+ for (i = 0; i < srv_path->s.con_num; i++) {
+@@ -1541,6 +1556,8 @@ static void rtrs_srv_close_work(struct work_struct *work)
+ /* Wait for all completion */
+ wait_for_completion(&srv_path->complete_done);
+
++ rtrs_srv_destroy_path_files(srv_path);
++
+ /* Notify upper layer if we are the last path */
+ rtrs_srv_path_down(srv_path);
+
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 2bcd1f23d07d2..8b38972394776 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -278,12 +278,13 @@ static void dev_iommu_free(struct device *dev)
+ kfree(param);
+ }
+
++DEFINE_MUTEX(iommu_probe_device_lock);
++
+ static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
+ {
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+ struct iommu_device *iommu_dev;
+ struct iommu_group *group;
+- static DEFINE_MUTEX(iommu_probe_device_lock);
+ int ret;
+
+ if (!ops)
+@@ -295,11 +296,9 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
+ * probably be able to use device_lock() here to minimise the scope,
+ * but for now enforcing a simple global ordering is fine.
+ */
+- mutex_lock(&iommu_probe_device_lock);
+- if (!dev_iommu_get(dev)) {
+- ret = -ENOMEM;
+- goto err_unlock;
+- }
++ lockdep_assert_held(&iommu_probe_device_lock);
++ if (!dev_iommu_get(dev))
++ return -ENOMEM;
+
+ if (!try_module_get(ops->owner)) {
+ ret = -EINVAL;
+@@ -326,7 +325,6 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+
+- mutex_unlock(&iommu_probe_device_lock);
+ iommu_device_link(iommu_dev, dev);
+
+ return 0;
+@@ -341,9 +339,6 @@ out_module_put:
+ err_free:
+ dev_iommu_free(dev);
+
+-err_unlock:
+- mutex_unlock(&iommu_probe_device_lock);
+-
+ return ret;
+ }
+
+@@ -353,7 +348,9 @@ int iommu_probe_device(struct device *dev)
+ struct iommu_group *group;
+ int ret;
+
++ mutex_lock(&iommu_probe_device_lock);
+ ret = __iommu_probe_device(dev, NULL);
++ mutex_unlock(&iommu_probe_device_lock);
+ if (ret)
+ goto err_out;
+
+@@ -1684,7 +1681,9 @@ static int probe_iommu_group(struct device *dev, void *data)
+ return 0;
+ }
+
++ mutex_lock(&iommu_probe_device_lock);
+ ret = __iommu_probe_device(dev, group_list);
++ mutex_unlock(&iommu_probe_device_lock);
+ if (ret == -ENODEV)
+ ret = 0;
+
+diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
+index 5696314ae69e7..1fa1db3be8529 100644
+--- a/drivers/iommu/of_iommu.c
++++ b/drivers/iommu/of_iommu.c
+@@ -112,16 +112,20 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
+ const u32 *id)
+ {
+ const struct iommu_ops *ops = NULL;
+- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
++ struct iommu_fwspec *fwspec;
+ int err = NO_IOMMU;
+
+ if (!master_np)
+ return NULL;
+
++ /* Serialise to make dev->iommu stable under our potential fwspec */
++ mutex_lock(&iommu_probe_device_lock);
++ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec) {
+- if (fwspec->ops)
++ if (fwspec->ops) {
++ mutex_unlock(&iommu_probe_device_lock);
+ return fwspec->ops;
+-
++ }
+ /* In the deferred case, start again from scratch */
+ iommu_fwspec_free(dev);
+ }
+@@ -155,6 +159,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
+ fwspec = dev_iommu_fwspec_get(dev);
+ ops = fwspec->ops;
+ }
++ mutex_unlock(&iommu_probe_device_lock);
++
+ /*
+ * If we have reason to believe the IOMMU driver missed the initial
+ * probe for dev, replay it to get things in order.
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 20f67edae95d0..0c2801d770901 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -93,6 +93,18 @@ static int remove_and_add_spares(struct mddev *mddev,
+ struct md_rdev *this);
+ static void mddev_detach(struct mddev *mddev);
+
++enum md_ro_state {
++ MD_RDWR,
++ MD_RDONLY,
++ MD_AUTO_READ,
++ MD_MAX_STATE
++};
++
++static bool md_is_rdwr(struct mddev *mddev)
++{
++ return (mddev->ro == MD_RDWR);
++}
++
+ /*
+ * Default number of read corrections we'll attempt on an rdev
+ * before ejecting it from the array. We divide the read error
+@@ -444,7 +456,7 @@ static void md_submit_bio(struct bio *bio)
+ if (!bio)
+ return;
+
+- if (mddev->ro == 1 && unlikely(rw == WRITE)) {
++ if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) {
+ if (bio_sectors(bio) != 0)
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+@@ -2643,7 +2655,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
+ int any_badblocks_changed = 0;
+ int ret = -1;
+
+- if (mddev->ro) {
++ if (!md_is_rdwr(mddev)) {
+ if (force_change)
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ return;
+@@ -3909,7 +3921,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
+ goto out_unlock;
+ }
+ rv = -EROFS;
+- if (mddev->ro)
++ if (!md_is_rdwr(mddev))
+ goto out_unlock;
+
+ /* request to change the personality. Need to ensure:
+@@ -4115,7 +4127,7 @@ layout_store(struct mddev *mddev, const char *buf, size_t len)
+ if (mddev->pers) {
+ if (mddev->pers->check_reshape == NULL)
+ err = -EBUSY;
+- else if (mddev->ro)
++ else if (!md_is_rdwr(mddev))
+ err = -EROFS;
+ else {
+ mddev->new_layout = n;
+@@ -4224,7 +4236,7 @@ chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
+ if (mddev->pers) {
+ if (mddev->pers->check_reshape == NULL)
+ err = -EBUSY;
+- else if (mddev->ro)
++ else if (!md_is_rdwr(mddev))
+ err = -EROFS;
+ else {
+ mddev->new_chunk_sectors = n >> 9;
+@@ -4347,13 +4359,13 @@ array_state_show(struct mddev *mddev, char *page)
+
+ if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
+ switch(mddev->ro) {
+- case 1:
++ case MD_RDONLY:
+ st = readonly;
+ break;
+- case 2:
++ case MD_AUTO_READ:
+ st = read_auto;
+ break;
+- case 0:
++ case MD_RDWR:
+ spin_lock(&mddev->lock);
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+ st = write_pending;
+@@ -4389,7 +4401,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ int err = 0;
+ enum array_state st = match_word(buf, array_states);
+
+- if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
++ if (mddev->pers && (st == active || st == clean) &&
++ mddev->ro != MD_RDONLY) {
+ /* don't take reconfig_mutex when toggling between
+ * clean and active
+ */
+@@ -4433,23 +4446,23 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ if (mddev->pers)
+ err = md_set_readonly(mddev, NULL);
+ else {
+- mddev->ro = 1;
++ mddev->ro = MD_RDONLY;
+ set_disk_ro(mddev->gendisk, 1);
+ err = do_md_run(mddev);
+ }
+ break;
+ case read_auto:
+ if (mddev->pers) {
+- if (mddev->ro == 0)
++ if (md_is_rdwr(mddev))
+ err = md_set_readonly(mddev, NULL);
+- else if (mddev->ro == 1)
++ else if (mddev->ro == MD_RDONLY)
+ err = restart_array(mddev);
+ if (err == 0) {
+- mddev->ro = 2;
++ mddev->ro = MD_AUTO_READ;
+ set_disk_ro(mddev->gendisk, 0);
+ }
+ } else {
+- mddev->ro = 2;
++ mddev->ro = MD_AUTO_READ;
+ err = do_md_run(mddev);
+ }
+ break;
+@@ -4474,7 +4487,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ wake_up(&mddev->sb_wait);
+ err = 0;
+ } else {
+- mddev->ro = 0;
++ mddev->ro = MD_RDWR;
+ set_disk_ro(mddev->gendisk, 0);
+ err = do_md_run(mddev);
+ }
+@@ -4775,7 +4788,7 @@ action_show(struct mddev *mddev, char *page)
+ if (test_bit(MD_RECOVERY_FROZEN, &recovery))
+ type = "frozen";
+ else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
+- (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
++ (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
+ if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
+ type = "reshape";
+ else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
+@@ -4861,11 +4874,11 @@ action_store(struct mddev *mddev, const char *page, size_t len)
+ set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ }
+- if (mddev->ro == 2) {
++ if (mddev->ro == MD_AUTO_READ) {
+ /* A write to sync_action is enough to justify
+ * canceling read-auto mode
+ */
+- mddev->ro = 0;
++ mddev->ro = MD_RDWR;
+ md_wakeup_thread(mddev->sync_thread);
+ }
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+@@ -5093,8 +5106,7 @@ max_sync_store(struct mddev *mddev, const char *buf, size_t len)
+ goto out_unlock;
+
+ err = -EBUSY;
+- if (max < mddev->resync_max &&
+- mddev->ro == 0 &&
++ if (max < mddev->resync_max && md_is_rdwr(mddev) &&
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ goto out_unlock;
+
+@@ -5829,8 +5841,8 @@ int md_run(struct mddev *mddev)
+ continue;
+ sync_blockdev(rdev->bdev);
+ invalidate_bdev(rdev->bdev);
+- if (mddev->ro != 1 && rdev_read_only(rdev)) {
+- mddev->ro = 1;
++ if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
++ mddev->ro = MD_RDONLY;
+ if (mddev->gendisk)
+ set_disk_ro(mddev->gendisk, 1);
+ }
+@@ -5938,8 +5950,8 @@ int md_run(struct mddev *mddev)
+
+ mddev->ok_start_degraded = start_dirty_degraded;
+
+- if (start_readonly && mddev->ro == 0)
+- mddev->ro = 2; /* read-only, but switch on first write */
++ if (start_readonly && md_is_rdwr(mddev))
++ mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */
+
+ err = pers->run(mddev);
+ if (err)
+@@ -6017,8 +6029,8 @@ int md_run(struct mddev *mddev)
+ mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
+ mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
+ mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
+- } else if (mddev->ro == 2) /* auto-readonly not meaningful */
+- mddev->ro = 0;
++ } else if (mddev->ro == MD_AUTO_READ)
++ mddev->ro = MD_RDWR;
+
+ atomic_set(&mddev->max_corr_read_errors,
+ MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
+@@ -6036,7 +6048,7 @@ int md_run(struct mddev *mddev)
+ if (rdev->raid_disk >= 0)
+ sysfs_link_rdev(mddev, rdev); /* failure here is OK */
+
+- if (mddev->degraded && !mddev->ro)
++ if (mddev->degraded && md_is_rdwr(mddev))
+ /* This ensures that recovering status is reported immediately
+ * via sysfs - until a lack of spares is confirmed.
+ */
+@@ -6128,7 +6140,7 @@ static int restart_array(struct mddev *mddev)
+ return -ENXIO;
+ if (!mddev->pers)
+ return -EINVAL;
+- if (!mddev->ro)
++ if (md_is_rdwr(mddev))
+ return -EBUSY;
+
+ rcu_read_lock();
+@@ -6147,7 +6159,7 @@ static int restart_array(struct mddev *mddev)
+ return -EROFS;
+
+ mddev->safemode = 0;
+- mddev->ro = 0;
++ mddev->ro = MD_RDWR;
+ set_disk_ro(disk, 0);
+ pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
+ /* Kick recovery or resync if necessary */
+@@ -6174,7 +6186,7 @@ static void md_clean(struct mddev *mddev)
+ mddev->clevel[0] = 0;
+ mddev->flags = 0;
+ mddev->sb_flags = 0;
+- mddev->ro = 0;
++ mddev->ro = MD_RDWR;
+ mddev->metadata_type[0] = 0;
+ mddev->chunk_sectors = 0;
+ mddev->ctime = mddev->utime = 0;
+@@ -6226,7 +6238,7 @@ static void __md_stop_writes(struct mddev *mddev)
+ }
+ md_bitmap_flush(mddev);
+
+- if (mddev->ro == 0 &&
++ if (md_is_rdwr(mddev) &&
+ ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
+ mddev->sb_flags)) {
+ /* mark array as shutdown cleanly */
+@@ -6302,6 +6314,9 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ int err = 0;
+ int did_freeze = 0;
+
++ if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
++ return -EBUSY;
++
+ if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
+ did_freeze = 1;
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+@@ -6314,8 +6329,6 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ * which will now never happen */
+ wake_up_process(mddev->sync_thread->tsk);
+
+- if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+- return -EBUSY;
+ mddev_unlock(mddev);
+ wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
+ &mddev->recovery));
+@@ -6328,29 +6341,30 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ mddev->sync_thread ||
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
+ pr_warn("md: %s still in use.\n",mdname(mddev));
+- if (did_freeze) {
+- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+- md_wakeup_thread(mddev->thread);
+- }
+ err = -EBUSY;
+ goto out;
+ }
++
+ if (mddev->pers) {
+ __md_stop_writes(mddev);
+
+- err = -ENXIO;
+- if (mddev->ro==1)
++ if (mddev->ro == MD_RDONLY) {
++ err = -ENXIO;
+ goto out;
+- mddev->ro = 1;
++ }
++
++ mddev->ro = MD_RDONLY;
+ set_disk_ro(mddev->gendisk, 1);
++ }
++
++out:
++ if ((mddev->pers && !err) || did_freeze) {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
+- err = 0;
+ }
+-out:
++
+ mutex_unlock(&mddev->open_mutex);
+ return err;
+ }
+@@ -6399,7 +6413,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
+ return -EBUSY;
+ }
+ if (mddev->pers) {
+- if (mddev->ro)
++ if (!md_is_rdwr(mddev))
+ set_disk_ro(disk, 0);
+
+ __md_stop_writes(mddev);
+@@ -6416,8 +6430,8 @@ static int do_md_stop(struct mddev *mddev, int mode,
+ mutex_unlock(&mddev->open_mutex);
+ mddev->changed = 1;
+
+- if (mddev->ro)
+- mddev->ro = 0;
++ if (!md_is_rdwr(mddev))
++ mddev->ro = MD_RDWR;
+ } else
+ mutex_unlock(&mddev->open_mutex);
+ /*
+@@ -7232,7 +7246,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ mddev->sync_thread)
+ return -EBUSY;
+- if (mddev->ro)
++ if (!md_is_rdwr(mddev))
+ return -EROFS;
+
+ rdev_for_each(rdev, mddev) {
+@@ -7262,7 +7276,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
+ /* change the number of raid disks */
+ if (mddev->pers->check_reshape == NULL)
+ return -EINVAL;
+- if (mddev->ro)
++ if (!md_is_rdwr(mddev))
+ return -EROFS;
+ if (raid_disks <= 0 ||
+ (mddev->max_disks && raid_disks >= mddev->max_disks))
+@@ -7686,26 +7700,25 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
+ * The remaining ioctls are changing the state of the
+ * superblock, so we do not allow them on read-only arrays.
+ */
+- if (mddev->ro && mddev->pers) {
+- if (mddev->ro == 2) {
+- mddev->ro = 0;
+- sysfs_notify_dirent_safe(mddev->sysfs_state);
+- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+- /* mddev_unlock will wake thread */
+- /* If a device failed while we were read-only, we
+- * need to make sure the metadata is updated now.
+- */
+- if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
+- mddev_unlock(mddev);
+- wait_event(mddev->sb_wait,
+- !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
+- !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
+- mddev_lock_nointr(mddev);
+- }
+- } else {
++ if (!md_is_rdwr(mddev) && mddev->pers) {
++ if (mddev->ro != MD_AUTO_READ) {
+ err = -EROFS;
+ goto unlock;
+ }
++ mddev->ro = MD_RDWR;
++ sysfs_notify_dirent_safe(mddev->sysfs_state);
++ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
++ /* mddev_unlock will wake thread */
++ /* If a device failed while we were read-only, we
++ * need to make sure the metadata is updated now.
++ */
++ if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
++ mddev_unlock(mddev);
++ wait_event(mddev->sb_wait,
++ !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
++ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
++ mddev_lock_nointr(mddev);
++ }
+ }
+
+ switch (cmd) {
+@@ -7791,11 +7804,11 @@ static int md_set_read_only(struct block_device *bdev, bool ro)
+ * Transitioning to read-auto need only happen for arrays that call
+ * md_write_start and which are not ready for writes yet.
+ */
+- if (!ro && mddev->ro == 1 && mddev->pers) {
++ if (!ro && mddev->ro == MD_RDONLY && mddev->pers) {
+ err = restart_array(mddev);
+ if (err)
+ goto out_unlock;
+- mddev->ro = 2;
++ mddev->ro = MD_AUTO_READ;
+ }
+
+ out_unlock:
+@@ -8269,9 +8282,9 @@ static int md_seq_show(struct seq_file *seq, void *v)
+ seq_printf(seq, "%s : %sactive", mdname(mddev),
+ mddev->pers ? "" : "in");
+ if (mddev->pers) {
+- if (mddev->ro==1)
++ if (mddev->ro == MD_RDONLY)
+ seq_printf(seq, " (read-only)");
+- if (mddev->ro==2)
++ if (mddev->ro == MD_AUTO_READ)
+ seq_printf(seq, " (auto-read-only)");
+ seq_printf(seq, " %s", mddev->pers->name);
+ }
+@@ -8530,10 +8543,10 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
+ if (bio_data_dir(bi) != WRITE)
+ return true;
+
+- BUG_ON(mddev->ro == 1);
+- if (mddev->ro == 2) {
++ BUG_ON(mddev->ro == MD_RDONLY);
++ if (mddev->ro == MD_AUTO_READ) {
+ /* need to switch to read/write */
+- mddev->ro = 0;
++ mddev->ro = MD_RDWR;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ md_wakeup_thread(mddev->sync_thread);
+@@ -8584,7 +8597,7 @@ void md_write_inc(struct mddev *mddev, struct bio *bi)
+ {
+ if (bio_data_dir(bi) != WRITE)
+ return;
+- WARN_ON_ONCE(mddev->in_sync || mddev->ro);
++ WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev));
+ percpu_ref_get(&mddev->writes_pending);
+ }
+ EXPORT_SYMBOL(md_write_inc);
+@@ -8690,7 +8703,7 @@ void md_allow_write(struct mddev *mddev)
+ {
+ if (!mddev->pers)
+ return;
+- if (mddev->ro)
++ if (!md_is_rdwr(mddev))
+ return;
+ if (!mddev->pers->sync_request)
+ return;
+@@ -8738,7 +8751,7 @@ void md_do_sync(struct md_thread *thread)
+ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
+ return;
+- if (mddev->ro) {/* never try to sync a read-only array */
++ if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ return;
+ }
+@@ -9207,9 +9220,9 @@ static int remove_and_add_spares(struct mddev *mddev,
+ if (test_bit(Faulty, &rdev->flags))
+ continue;
+ if (!test_bit(Journal, &rdev->flags)) {
+- if (mddev->ro &&
+- ! (rdev->saved_raid_disk >= 0 &&
+- !test_bit(Bitmap_sync, &rdev->flags)))
++ if (!md_is_rdwr(mddev) &&
++ !(rdev->saved_raid_disk >= 0 &&
++ !test_bit(Bitmap_sync, &rdev->flags)))
+ continue;
+
+ rdev->recovery_offset = 0;
+@@ -9307,7 +9320,8 @@ void md_check_recovery(struct mddev *mddev)
+ flush_signals(current);
+ }
+
+- if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
++ if (!md_is_rdwr(mddev) &&
++ !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ return;
+ if ( ! (
+ (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
+@@ -9326,7 +9340,7 @@ void md_check_recovery(struct mddev *mddev)
+ if (!mddev->external && mddev->safemode == 1)
+ mddev->safemode = 0;
+
+- if (mddev->ro) {
++ if (!md_is_rdwr(mddev)) {
+ struct md_rdev *rdev;
+ if (!mddev->external && mddev->in_sync)
+ /* 'Blocked' flag not needed as failed devices
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 98d4e93efa31c..e4564ca1f2434 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5905,11 +5905,11 @@ static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf,
+ int dd_idx;
+
+ for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
+- if (dd_idx == sh->pd_idx)
++ if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
+ continue;
+
+ min_sector = min(min_sector, sh->dev[dd_idx].sector);
+- max_sector = min(max_sector, sh->dev[dd_idx].sector);
++ max_sector = max(max_sector, sh->dev[dd_idx].sector);
+ }
+
+ spin_lock_irq(&conf->device_lock);
+diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
+index 0b2fbe1335a77..c70c89209fe55 100644
+--- a/drivers/misc/mei/client.c
++++ b/drivers/misc/mei/client.c
+@@ -1978,7 +1978,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
+
+ mei_hdr = mei_msg_hdr_init(cb);
+ if (IS_ERR(mei_hdr)) {
+- rets = -PTR_ERR(mei_hdr);
++ rets = PTR_ERR(mei_hdr);
+ mei_hdr = NULL;
+ goto err;
+ }
+@@ -2002,7 +2002,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
+
+ hbuf_slots = mei_hbuf_empty_slots(dev);
+ if (hbuf_slots < 0) {
+- rets = -EOVERFLOW;
++ buf_len = -EOVERFLOW;
+ goto out;
+ }
+
+diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
+index 19e996a829c9d..b54275389f8ac 100644
+--- a/drivers/net/arcnet/arcdevice.h
++++ b/drivers/net/arcnet/arcdevice.h
+@@ -186,6 +186,8 @@ do { \
+ #define ARC_IS_5MBIT 1 /* card default speed is 5MBit */
+ #define ARC_CAN_10MBIT 2 /* card uses COM20022, supporting 10MBit,
+ but default is 2.5MBit. */
++#define ARC_HAS_LED 4 /* card has software controlled LEDs */
++#define ARC_HAS_ROTARY 8 /* card has rotary encoder */
+
+ /* information needed to define an encapsulation driver */
+ struct ArcProto {
+diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
+index c580acb8b1d34..7b5c8bb02f119 100644
+--- a/drivers/net/arcnet/com20020-pci.c
++++ b/drivers/net/arcnet/com20020-pci.c
+@@ -213,12 +213,13 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ if (!strncmp(ci->name, "EAE PLX-PCI FB2", 15))
+ lp->backplane = 1;
+
+- /* Get the dev_id from the PLX rotary coder */
+- if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
+- dev_id_mask = 0x3;
+- dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
+-
+- snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
++ if (ci->flags & ARC_HAS_ROTARY) {
++ /* Get the dev_id from the PLX rotary coder */
++ if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
++ dev_id_mask = 0x3;
++ dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
++ snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
++ }
+
+ if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
+ pr_err("IO address %Xh is empty!\n", ioaddr);
+@@ -230,6 +231,10 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ goto err_free_arcdev;
+ }
+
++ ret = com20020_found(dev, IRQF_SHARED);
++ if (ret)
++ goto err_free_arcdev;
++
+ card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev),
+ GFP_KERNEL);
+ if (!card) {
+@@ -239,41 +244,39 @@ static int com20020pci_probe(struct pci_dev *pdev,
+
+ card->index = i;
+ card->pci_priv = priv;
+- card->tx_led.brightness_set = led_tx_set;
+- card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
+- GFP_KERNEL, "arc%d-%d-tx",
+- dev->dev_id, i);
+- card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+- "pci:green:tx:%d-%d",
+- dev->dev_id, i);
+-
+- card->tx_led.dev = &dev->dev;
+- card->recon_led.brightness_set = led_recon_set;
+- card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
+- GFP_KERNEL, "arc%d-%d-recon",
+- dev->dev_id, i);
+- card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+- "pci:red:recon:%d-%d",
+- dev->dev_id, i);
+- card->recon_led.dev = &dev->dev;
+- card->dev = dev;
+-
+- ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
+- if (ret)
+- goto err_free_arcdev;
+
+- ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
+- if (ret)
+- goto err_free_arcdev;
+-
+- dev_set_drvdata(&dev->dev, card);
+-
+- ret = com20020_found(dev, IRQF_SHARED);
+- if (ret)
+- goto err_free_arcdev;
+-
+- devm_arcnet_led_init(dev, dev->dev_id, i);
++ if (ci->flags & ARC_HAS_LED) {
++ card->tx_led.brightness_set = led_tx_set;
++ card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
++ GFP_KERNEL, "arc%d-%d-tx",
++ dev->dev_id, i);
++ card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
++ "pci:green:tx:%d-%d",
++ dev->dev_id, i);
++
++ card->tx_led.dev = &dev->dev;
++ card->recon_led.brightness_set = led_recon_set;
++ card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
++ GFP_KERNEL, "arc%d-%d-recon",
++ dev->dev_id, i);
++ card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
++ "pci:red:recon:%d-%d",
++ dev->dev_id, i);
++ card->recon_led.dev = &dev->dev;
++
++ ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
++ if (ret)
++ goto err_free_arcdev;
++
++ ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
++ if (ret)
++ goto err_free_arcdev;
++
++ dev_set_drvdata(&dev->dev, card);
++ devm_arcnet_led_init(dev, dev->dev_id, i);
++ }
+
++ card->dev = dev;
+ list_add(&card->list, &priv->list_dev);
+ continue;
+
+@@ -329,7 +332,7 @@ static struct com20020_pci_card_info card_info_5mbit = {
+ };
+
+ static struct com20020_pci_card_info card_info_sohard = {
+- .name = "PLX-PCI",
++ .name = "SOHARD SH ARC-PCI",
+ .devcount = 1,
+ /* SOHARD needs PCI base addr 4 */
+ .chan_map_tbl = {
+@@ -364,7 +367,7 @@ static struct com20020_pci_card_info card_info_eae_arc1 = {
+ },
+ },
+ .rotary = 0x0,
+- .flags = ARC_CAN_10MBIT,
++ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
+ };
+
+ static struct com20020_pci_card_info card_info_eae_ma1 = {
+@@ -396,7 +399,7 @@ static struct com20020_pci_card_info card_info_eae_ma1 = {
+ },
+ },
+ .rotary = 0x0,
+- .flags = ARC_CAN_10MBIT,
++ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
+ };
+
+ static struct com20020_pci_card_info card_info_eae_fb2 = {
+@@ -421,7 +424,7 @@ static struct com20020_pci_card_info card_info_eae_fb2 = {
+ },
+ },
+ .rotary = 0x0,
+- .flags = ARC_CAN_10MBIT,
++ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
+ };
+
+ static const struct pci_device_id com20020pci_id_table[] = {
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+index 80b44043e6c53..28c9b6f1a54f1 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+@@ -553,17 +553,17 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
+
+ /* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @adapter: pointer to adapter struct
+- * @skb: particular skb to send timestamp with
++ * @shhwtstamps: particular skb_shared_hwtstamps to save timestamp
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the hwtstamps structure which
+ * is passed up the network stack
+ */
+-static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
++static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtstamps *shhwtstamps,
+ u64 timestamp)
+ {
+ timestamp -= atomic_read(&aq_ptp->offset_ingress);
+- aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
++ aq_ptp_convert_to_hwtstamp(aq_ptp, shhwtstamps, timestamp);
+ }
+
+ void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+@@ -639,7 +639,7 @@ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+ &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
+ }
+
+-u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
++u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
+ unsigned int len)
+ {
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+@@ -648,7 +648,7 @@ u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+ p, len, &timestamp);
+
+ if (ret > 0)
+- aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
++ aq_ptp_rx_hwtstamp(aq_ptp, shhwtstamps, timestamp);
+
+ return ret;
+ }
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
+index 28ccb7ca2df9e..210b723f22072 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
+@@ -67,7 +67,7 @@ int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ /* Return either ring is belong to PTP or not*/
+ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
+
+-u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
++u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
+ unsigned int len);
+
+ struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
+@@ -143,7 +143,7 @@ static inline bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+ }
+
+ static inline u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic,
+- struct sk_buff *skb, u8 *p,
++ struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
+ unsigned int len)
+ {
+ return 0;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 2dc8d215a5918..b5a49166fa972 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -647,7 +647,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
+ }
+ if (is_ptp_ring)
+ buff->len -=
+- aq_ptp_extract_ts(self->aq_nic, skb,
++ aq_ptp_extract_ts(self->aq_nic, skb_hwtstamps(skb),
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
+
+@@ -742,6 +742,8 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
+ struct aq_ring_buff_s *buff_ = NULL;
++ u16 ptp_hwtstamp_len = 0;
++ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb = NULL;
+ unsigned int next_ = 0U;
+ struct xdp_buff xdp;
+@@ -810,11 +812,12 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ hard_start = page_address(buff->rxdata.page) +
+ buff->rxdata.pg_off - rx_ring->page_offset;
+
+- if (is_ptp_ring)
+- buff->len -=
+- aq_ptp_extract_ts(rx_ring->aq_nic, skb,
+- aq_buf_vaddr(&buff->rxdata),
+- buff->len);
++ if (is_ptp_ring) {
++ ptp_hwtstamp_len = aq_ptp_extract_ts(rx_ring->aq_nic, &shhwtstamps,
++ aq_buf_vaddr(&buff->rxdata),
++ buff->len);
++ buff->len -= ptp_hwtstamp_len;
++ }
+
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
+@@ -834,6 +837,9 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ if (IS_ERR(skb) || !skb)
+ continue;
+
++ if (ptp_hwtstamp_len > 0)
++ *skb_hwtstamps(skb) = shhwtstamps;
++
+ if (buff->is_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ buff->vlan_rx_tag);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+index d8afcf8d6b30e..4d6663ff84722 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+@@ -2075,6 +2075,7 @@ destroy_flow_table:
+ rhashtable_destroy(&tc_info->flow_table);
+ free_tc_info:
+ kfree(tc_info);
++ bp->tc_info = NULL;
+ return rc;
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 85570e40c8e9b..f60a16de565ed 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -6853,7 +6853,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
+ desc_idx, *post_ptr);
+ drop_it_no_recycle:
+ /* Other statistics kept track of by card. */
+- tp->rx_dropped++;
++ tnapi->rx_dropped++;
+ goto next_pkt;
+ }
+
+@@ -7879,8 +7879,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
+
+ segs = skb_gso_segment(skb, tp->dev->features &
+ ~(NETIF_F_TSO | NETIF_F_TSO6));
+- if (IS_ERR(segs) || !segs)
++ if (IS_ERR(segs) || !segs) {
++ tnapi->tx_dropped++;
+ goto tg3_tso_bug_end;
++ }
+
+ skb_list_walk_safe(segs, seg, next) {
+ skb_mark_not_on_list(seg);
+@@ -8151,7 +8153,7 @@ dma_error:
+ drop:
+ dev_kfree_skb_any(skb);
+ drop_nofree:
+- tp->tx_dropped++;
++ tnapi->tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+@@ -9330,7 +9332,7 @@ static void __tg3_set_rx_mode(struct net_device *);
+ /* tp->lock is held. */
+ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
+ {
+- int err;
++ int err, i;
+
+ tg3_stop_fw(tp);
+
+@@ -9351,6 +9353,13 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
+
+ /* And make sure the next sample is new data */
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
++
++ for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ tnapi->rx_dropped = 0;
++ tnapi->tx_dropped = 0;
++ }
+ }
+
+ return err;
+@@ -11900,6 +11909,9 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
+ {
+ struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
+ struct tg3_hw_stats *hw_stats = tp->hw_stats;
++ unsigned long rx_dropped;
++ unsigned long tx_dropped;
++ int i;
+
+ stats->rx_packets = old_stats->rx_packets +
+ get_stat64(&hw_stats->rx_ucast_packets) +
+@@ -11946,8 +11958,26 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
+ stats->rx_missed_errors = old_stats->rx_missed_errors +
+ get_stat64(&hw_stats->rx_discards);
+
+- stats->rx_dropped = tp->rx_dropped;
+- stats->tx_dropped = tp->tx_dropped;
++ /* Aggregate per-queue counters. The per-queue counters are updated
++ * by a single writer, race-free. The result computed by this loop
++ * might not be 100% accurate (counters can be updated in the middle of
++ * the loop) but the next tg3_get_nstats() will recompute the current
++ * value so it is acceptable.
++ *
++ * Note that these counters wrap around at 4G on 32bit machines.
++ */
++ rx_dropped = (unsigned long)(old_stats->rx_dropped);
++ tx_dropped = (unsigned long)(old_stats->tx_dropped);
++
++ for (i = 0; i < tp->irq_cnt; i++) {
++ struct tg3_napi *tnapi = &tp->napi[i];
++
++ rx_dropped += tnapi->rx_dropped;
++ tx_dropped += tnapi->tx_dropped;
++ }
++
++ stats->rx_dropped = rx_dropped;
++ stats->tx_dropped = tx_dropped;
+ }
+
+ static int tg3_get_regs_len(struct net_device *dev)
+diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
+index 1000c894064f0..8d753f8c5b065 100644
+--- a/drivers/net/ethernet/broadcom/tg3.h
++++ b/drivers/net/ethernet/broadcom/tg3.h
+@@ -3018,6 +3018,7 @@ struct tg3_napi {
+ u16 *rx_rcb_prod_idx;
+ struct tg3_rx_prodring_set prodring;
+ struct tg3_rx_buffer_desc *rx_rcb;
++ unsigned long rx_dropped;
+
+ u32 tx_prod ____cacheline_aligned;
+ u32 tx_cons;
+@@ -3026,6 +3027,7 @@ struct tg3_napi {
+ u32 prodmbox;
+ struct tg3_tx_buffer_desc *tx_ring;
+ struct tg3_tx_ring_info *tx_buffers;
++ unsigned long tx_dropped;
+
+ dma_addr_t status_mapping;
+ dma_addr_t rx_rcb_mapping;
+@@ -3219,8 +3221,6 @@ struct tg3 {
+
+
+ /* begin "everything else" cacheline(s) section */
+- unsigned long rx_dropped;
+- unsigned long tx_dropped;
+ struct rtnl_link_stats64 net_stats_prev;
+ struct tg3_ethtool_stats estats_prev;
+
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+index 928d934cb21a5..f75668c479351 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+@@ -66,6 +66,27 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
+ }
+ }
+
++static u32 hns_mac_link_anti_shake(struct mac_driver *mac_ctrl_drv)
++{
++#define HNS_MAC_LINK_WAIT_TIME 5
++#define HNS_MAC_LINK_WAIT_CNT 40
++
++ u32 link_status = 0;
++ int i;
++
++ if (!mac_ctrl_drv->get_link_status)
++ return link_status;
++
++ for (i = 0; i < HNS_MAC_LINK_WAIT_CNT; i++) {
++ msleep(HNS_MAC_LINK_WAIT_TIME);
++ mac_ctrl_drv->get_link_status(mac_ctrl_drv, &link_status);
++ if (!link_status)
++ break;
++ }
++
++ return link_status;
++}
++
+ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
+ {
+ struct mac_driver *mac_ctrl_drv;
+@@ -83,6 +104,14 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
+ &sfp_prsnt);
+ if (!ret)
+ *link_status = *link_status && sfp_prsnt;
++
++ /* for FIBER port, it may have a fake link up.
++ * when the link status changes from down to up, we need to do
++ * anti-shake. the anti-shake time is base on tests.
++ * only FIBER port need to do this.
++ */
++ if (*link_status && !mac_cb->link)
++ *link_status = hns_mac_link_anti_shake(mac_ctrl_drv);
+ }
+
+ mac_cb->link = *link_status;
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+index 7cf10d1e2b311..85722afe21770 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+@@ -142,7 +142,8 @@ MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
+
+ static void fill_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+- int buf_num, enum hns_desc_type type, int mtu)
++ int buf_num, enum hns_desc_type type, int mtu,
++ bool is_gso)
+ {
+ struct hnae_desc *desc = &ring->desc[ring->next_to_use];
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+@@ -275,6 +276,15 @@ static int hns_nic_maybe_stop_tso(
+ return 0;
+ }
+
++static int hns_nic_maybe_stop_tx_v2(struct sk_buff **out_skb, int *bnum,
++ struct hnae_ring *ring)
++{
++ if (skb_is_gso(*out_skb))
++ return hns_nic_maybe_stop_tso(out_skb, bnum, ring);
++ else
++ return hns_nic_maybe_stop_tx(out_skb, bnum, ring);
++}
++
+ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+@@ -300,6 +310,19 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
+ mtu);
+ }
+
++static void fill_desc_v2(struct hnae_ring *ring, void *priv,
++ int size, dma_addr_t dma, int frag_end,
++ int buf_num, enum hns_desc_type type, int mtu,
++ bool is_gso)
++{
++ if (is_gso)
++ fill_tso_desc(ring, priv, size, dma, frag_end, buf_num, type,
++ mtu);
++ else
++ fill_v2_desc(ring, priv, size, dma, frag_end, buf_num, type,
++ mtu);
++}
++
+ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data)
+@@ -313,6 +336,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ int seg_num;
+ dma_addr_t dma;
+ int size, next_to_use;
++ bool is_gso;
+ int i;
+
+ switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
+@@ -339,8 +363,9 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ ring->stats.sw_err_cnt++;
+ goto out_err_tx_ok;
+ }
++ is_gso = skb_is_gso(skb);
+ priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
+- buf_num, DESC_TYPE_SKB, ndev->mtu);
++ buf_num, DESC_TYPE_SKB, ndev->mtu, is_gso);
+
+ /* fill the fragments */
+ for (i = 1; i < seg_num; i++) {
+@@ -354,7 +379,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ }
+ priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
+ seg_num - 1 == i ? 1 : 0, buf_num,
+- DESC_TYPE_PAGE, ndev->mtu);
++ DESC_TYPE_PAGE, ndev->mtu, is_gso);
+ }
+
+ /*complete translate all packets*/
+@@ -1776,15 +1801,6 @@ static int hns_nic_set_features(struct net_device *netdev,
+ netdev_info(netdev, "enet v1 do not support tso!\n");
+ break;
+ default:
+- if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
+- priv->ops.fill_desc = fill_tso_desc;
+- priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
+- /* The chip only support 7*4096 */
+- netif_set_tso_max_size(netdev, 7 * 4096);
+- } else {
+- priv->ops.fill_desc = fill_v2_desc;
+- priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+- }
+ break;
+ }
+ netdev->features = features;
+@@ -2159,16 +2175,9 @@ static void hns_nic_set_priv_ops(struct net_device *netdev)
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+ } else {
+ priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
+- if ((netdev->features & NETIF_F_TSO) ||
+- (netdev->features & NETIF_F_TSO6)) {
+- priv->ops.fill_desc = fill_tso_desc;
+- priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
+- /* This chip only support 7*4096 */
+- netif_set_tso_max_size(netdev, 7 * 4096);
+- } else {
+- priv->ops.fill_desc = fill_v2_desc;
+- priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+- }
++ priv->ops.fill_desc = fill_desc_v2;
++ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx_v2;
++ netif_set_tso_max_size(netdev, 7 * 4096);
+ /* enable tso when init
+ * control tso on/off through TSE bit in bd
+ */
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+index ffa9d6573f54b..3f3ee032f631c 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+@@ -44,7 +44,8 @@ struct hns_nic_ring_data {
+ struct hns_nic_ops {
+ void (*fill_desc)(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+- int buf_num, enum hns_desc_type type, int mtu);
++ int buf_num, enum hns_desc_type type, int mtu,
++ bool is_gso);
+ int (*maybe_stop_tx)(struct sk_buff **out_skb,
+ int *bnum, struct hnae_ring *ring);
+ void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 9f5824eb8808a..b4157ff370a31 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -16158,7 +16158,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
+ if (val < MAX_FRAME_SIZE_DEFAULT)
+ dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
+- i, val);
++ pf->hw.port, val);
+
+ /* Add a filter to drop all Flow control frames from any VSI from being
+ * transmitted. By doing so we stop a malicious VF from sending out
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index c13b4fa659ee9..31e02624aca48 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -829,18 +829,10 @@ static int __iavf_set_coalesce(struct net_device *netdev,
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+- if (ec->rx_coalesce_usecs == 0) {
+- if (ec->use_adaptive_rx_coalesce)
+- netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
+- } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||
+- (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {
++ if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) {
+ netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
+ return -EINVAL;
+- } else if (ec->tx_coalesce_usecs == 0) {
+- if (ec->use_adaptive_tx_coalesce)
+- netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
+- } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||
+- (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {
++ } else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) {
+ netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
+ return -EINVAL;
+ }
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+index 7e6ee32d19b69..10ba36602c0c1 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+@@ -15,7 +15,6 @@
+ */
+ #define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+ #define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */
+-#define IAVF_MIN_ITR 2 /* reg uses 2 usec resolution */
+ #define IAVF_ITR_100K 10 /* all values below must be even */
+ #define IAVF_ITR_50K 20
+ #define IAVF_ITR_20K 50
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+index a0c31f5b2ce05..03ebabd616353 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+@@ -1877,7 +1877,7 @@ struct mcs_hw_info {
+ u8 tcam_entries; /* RX/TX Tcam entries per mcs block */
+ u8 secy_entries; /* RX/TX SECY entries per mcs block */
+ u8 sc_entries; /* RX/TX SC CAM entries per mcs block */
+- u8 sa_entries; /* PN table entries = SA entries */
++ u16 sa_entries; /* PN table entries = SA entries */
+ u64 rsvd[16];
+ };
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+index c43f19dfbd744..c1775bd01c2b4 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+@@ -117,7 +117,7 @@ void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
+ stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
+
+- reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
++ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(id);
+ stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
+@@ -215,7 +215,7 @@ void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+- reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
++ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(id);
+ stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+@@ -1219,6 +1219,17 @@ struct mcs *mcs_get_pdata(int mcs_id)
+ return NULL;
+ }
+
++bool is_mcs_bypass(int mcs_id)
++{
++ struct mcs *mcs_dev;
++
++ list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
++ if (mcs_dev->mcs_id == mcs_id)
++ return mcs_dev->bypass;
++ }
++ return true;
++}
++
+ void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
+ {
+ u64 val = 0;
+@@ -1436,7 +1447,7 @@ static int mcs_x2p_calibration(struct mcs *mcs)
+ return err;
+ }
+
+-static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
++static void mcs_set_external_bypass(struct mcs *mcs, bool bypass)
+ {
+ u64 val;
+
+@@ -1447,6 +1458,7 @@ static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
+ else
+ val &= ~BIT_ULL(6);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
++ mcs->bypass = bypass;
+ }
+
+ static void mcs_global_cfg(struct mcs *mcs)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
+index 0f89dcb764654..f927cc61dfd21 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
+@@ -149,6 +149,7 @@ struct mcs {
+ u16 num_vec;
+ void *rvu;
+ u16 *tx_sa_active;
++ bool bypass;
+ };
+
+ struct mcs_ops {
+@@ -206,6 +207,7 @@ void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *
+ int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
+ int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
+ int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
++bool is_mcs_bypass(int mcs_id);
+
+ /* CN10K-B APIs */
+ void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
+index f3ab01fc363c8..f4c6de89002c1 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
+@@ -810,14 +810,37 @@
+ offset = 0x9d8ull; \
+ offset; })
+
++#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(a) ({ \
++ u64 offset; \
++ \
++ offset = 0xee80ull; \
++ if (mcs->hw->mcs_blks > 1) \
++ offset = 0xe818ull; \
++ offset += (a) * 0x8ull; \
++ offset; })
++
++#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) ({ \
++ u64 offset; \
++ \
++ offset = 0xa680ull; \
++ if (mcs->hw->mcs_blks > 1) \
++ offset = 0xd018ull; \
++ offset += (a) * 0x8ull; \
++ offset; })
++
++#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) ({ \
++ u64 offset; \
++ \
++ offset = 0xf680ull; \
++ if (mcs->hw->mcs_blks > 1) \
++ offset = 0xe018ull; \
++ offset += (a) * 0x8ull; \
++ offset; })
++
+ #define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull)
+-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a) (0xa680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull)
+-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) (0xd018ull + (a) * 0x8ull)
+-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a) (0xee80ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull)
+-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull)
+ #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index 733add3a9dc6b..d88d86bf07b03 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -2622,6 +2622,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
+ */
+ rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+
++ if (rvu->mcs_blk_cnt)
++ rvu_mcs_flr_handler(rvu, pcifunc);
++
+ mutex_unlock(&rvu->flr_lock);
+ }
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index a3346ea7876c5..95a7bc396e8ea 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -325,6 +325,7 @@ struct nix_hw {
+ struct nix_txvlan txvlan;
+ struct nix_ipolicer *ipolicer;
+ u64 *tx_credits;
++ u8 cc_mcs_cnt;
+ };
+
+ /* RVU block's capabilities or functionality,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+index dc7bd2ce78f7d..d609512998992 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+@@ -1285,7 +1285,7 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+- goto err;
++ return -ENOMEM;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
+ INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
+@@ -1293,9 +1293,6 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
+ INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
+
+ return 0;
+-err:
+- rvu_npa_health_reporters_destroy(rvu_dl);
+- return -ENOMEM;
+ }
+
+ static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 7310047136986..959f36efdc4a6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -12,6 +12,7 @@
+ #include "rvu_reg.h"
+ #include "rvu.h"
+ #include "npc.h"
++#include "mcs.h"
+ #include "cgx.h"
+ #include "lmac_common.h"
+ #include "rvu_npc_hash.h"
+@@ -4164,6 +4165,12 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
+ SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ }
+
++ /* Get MCS external bypass status for CN10K-B */
++ if (mcs_get_blkcnt() == 1) {
++ /* Adjust for 2 credits when external bypass is disabled */
++ nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
++ }
++
+ /* Set credits for Tx links assuming max packet length allowed.
+ * This will be reconfigured based on MTU set for PF/VF.
+ */
+@@ -4187,6 +4194,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
+ tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
+ /* Enable credits and set credit pkt count to max allowed */
+ cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
++ cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
+
+ link = iter + slink;
+ nix_hw->tx_credits[link] = tx_credits;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+index 16cfc802e348d..f65805860c8d4 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+@@ -389,7 +389,13 @@ static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int bank, nixlf, index;
+
+ /* get ucast entry rule entry index */
+- nix_get_nixlf(rvu, pf_func, &nixlf, NULL);
++ if (nix_get_nixlf(rvu, pf_func, &nixlf, NULL)) {
++ dev_err(rvu->dev, "%s: nixlf not attached to pcifunc:0x%x\n",
++ __func__, pf_func);
++ /* Action 0 is drop */
++ return 0;
++ }
++
+ index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
+ NIXLF_UCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
+index b3150f0532919..d46ac29adb966 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
+@@ -31,8 +31,8 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
+ {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
+ {0x1200, 0x12E0} } },
+ {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
+- {0x1610, 0x1618}, {0x1700, 0x17B0} } },
+- {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } },
++ {0x1610, 0x1618}, {0x1700, 0x17C8} } },
++ {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17C8} } },
+ {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
+ };
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+index 39f7a7cb27558..b690e5566f12a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+@@ -434,6 +434,7 @@
+
+ #define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0)
+ #define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
++#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32)
+
+ /* SSO */
+ #define SSO_AF_CONST (0x1000)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+index aaf1af2a402ec..af779ae40d3c2 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+@@ -323,9 +323,12 @@ static void otx2_get_pauseparam(struct net_device *netdev,
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return;
+
++ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+- if (!req)
++ if (!req) {
++ mutex_unlock(&pfvf->mbox.lock);
+ return;
++ }
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pause_frm_cfg *)
+@@ -333,6 +336,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
+ pause->rx_pause = rsp->rx_pause;
+ pause->tx_pause = rsp->tx_pause;
+ }
++ mutex_unlock(&pfvf->mbox.lock);
+ }
+
+ static int otx2_set_pauseparam(struct net_device *netdev,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 18c5d2b3f7f95..55807e2043edf 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -1676,6 +1676,14 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
+ mutex_unlock(&pf->mbox.lock);
+ }
+
++static void otx2_set_irq_coalesce(struct otx2_nic *pfvf)
++{
++ int cint;
++
++ for (cint = 0; cint < pfvf->hw.cint_cnt; cint++)
++ otx2_config_irq_coalescing(pfvf, cint);
++}
++
+ static void otx2_dim_work(struct work_struct *w)
+ {
+ struct dim_cq_moder cur_moder;
+@@ -1691,6 +1699,7 @@ static void otx2_dim_work(struct work_struct *w)
+ CQ_TIMER_THRESH_MAX : cur_moder.usec;
+ pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
+ NAPI_POLL_WEIGHT : cur_moder.pkts;
++ otx2_set_irq_coalesce(pfvf);
+ dim->state = DIM_START_MEASURE;
+ }
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index 20d801d30c732..aee392a15b23c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -510,11 +510,18 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p
+ {
+ struct dim_sample dim_sample;
+ u64 rx_frames, rx_bytes;
++ u64 tx_frames, tx_bytes;
+
+ rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
+ OTX2_GET_RX_STATS(RX_UCAST);
+ rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
+- dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
++ tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
++ tx_frames = OTX2_GET_TX_STATS(TX_UCAST);
++
++ dim_update_sample(pfvf->napi_events,
++ rx_frames + tx_frames,
++ rx_bytes + tx_bytes,
++ &dim_sample);
+ net_dim(&cq_poll->dim, dim_sample);
+ }
+
+@@ -555,16 +562,9 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return workdone;
+
+- /* Check for adaptive interrupt coalesce */
+- if (workdone != 0 &&
+- ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+- OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
+- /* Adjust irq coalese using net_dim */
++ /* Adjust irq coalese using net_dim */
++ if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
+ otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+- /* Update irq coalescing */
+- for (i = 0; i < pfvf->hw.cint_cnt; i++)
+- otx2_config_irq_coalescing(pfvf, i);
+- }
+
+ /* Re-enable interrupts */
+ otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+index 93a4258421667..13dfcf9f75dad 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+@@ -214,7 +214,7 @@ struct ionic_desc_info {
+ void *cb_arg;
+ };
+
+-#define IONIC_QUEUE_NAME_MAX_SZ 32
++#define IONIC_QUEUE_NAME_MAX_SZ 16
+
+ struct ionic_queue {
+ struct device *dev;
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index a89ab455af67d..f7634884c7508 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -46,24 +46,24 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif);
+ static void ionic_dim_work(struct work_struct *work)
+ {
+ struct dim *dim = container_of(work, struct dim, work);
++ struct ionic_intr_info *intr;
+ struct dim_cq_moder cur_moder;
+ struct ionic_qcq *qcq;
++ struct ionic_lif *lif;
+ u32 new_coal;
+
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ qcq = container_of(dim, struct ionic_qcq, dim);
+- new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
++ lif = qcq->q.lif;
++ new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
+ new_coal = new_coal ? new_coal : 1;
+
+- if (qcq->intr.dim_coal_hw != new_coal) {
+- unsigned int qi = qcq->cq.bound_q->index;
+- struct ionic_lif *lif = qcq->q.lif;
+-
+- qcq->intr.dim_coal_hw = new_coal;
++ intr = &qcq->intr;
++ if (intr->dim_coal_hw != new_coal) {
++ intr->dim_coal_hw = new_coal;
+
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+- lif->rxqcqs[qi]->intr.index,
+- qcq->intr.dim_coal_hw);
++ intr->index, intr->dim_coal_hw);
+ }
+
+ dim->state = DIM_START_MEASURE;
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index abfa375b08878..d22457f2cf9cf 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -193,6 +193,7 @@ enum rtl_registers {
+ /* No threshold before first PCI xfer */
+ #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
+ #define RX_EARLY_OFF (1 << 11)
++#define RX_PAUSE_SLOT_ON (1 << 11) /* 8125b and later */
+ #define RXCFG_DMA_SHIFT 8
+ /* Unlimited maximum PCI burst. */
+ #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
+@@ -2237,9 +2238,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
+ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
+- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
++ case RTL_GIGA_MAC_VER_61:
+ RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
+ break;
++ case RTL_GIGA_MAC_VER_63:
++ RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
++ RX_PAUSE_SLOT_ON);
++ break;
+ default:
+ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
+ break;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+index e95d35f1e5a0c..8fd167501fa0e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+@@ -710,28 +710,22 @@ void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
+ }
+ }
+
+-void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
++void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++ u32 num_txq, u32 num_rxq,
+ bool enable)
+ {
+ u32 value;
+
+- if (!enable) {
+- value = readl(ioaddr + MAC_FPE_CTRL_STS);
+-
+- value &= ~EFPE;
+-
+- writel(value, ioaddr + MAC_FPE_CTRL_STS);
+- return;
++ if (enable) {
++ cfg->fpe_csr = EFPE;
++ value = readl(ioaddr + GMAC_RXQ_CTRL1);
++ value &= ~GMAC_RXQCTRL_FPRQ;
++ value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
++ writel(value, ioaddr + GMAC_RXQ_CTRL1);
++ } else {
++ cfg->fpe_csr = 0;
+ }
+-
+- value = readl(ioaddr + GMAC_RXQ_CTRL1);
+- value &= ~GMAC_RXQCTRL_FPRQ;
+- value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
+- writel(value, ioaddr + GMAC_RXQ_CTRL1);
+-
+- value = readl(ioaddr + MAC_FPE_CTRL_STS);
+- value |= EFPE;
+- writel(value, ioaddr + MAC_FPE_CTRL_STS);
++ writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
+ }
+
+ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+@@ -741,6 +735,9 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+
+ status = FPE_EVENT_UNKNOWN;
+
++ /* Reads from the MAC_FPE_CTRL_STS register should only be performed
++ * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
++ */
+ value = readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ if (value & TRSP) {
+@@ -766,19 +763,15 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+ return status;
+ }
+
+-void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, enum stmmac_mpacket_type type)
++void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++ enum stmmac_mpacket_type type)
+ {
+- u32 value;
++ u32 value = cfg->fpe_csr;
+
+- value = readl(ioaddr + MAC_FPE_CTRL_STS);
+-
+- if (type == MPACKET_VERIFY) {
+- value &= ~SRSP;
++ if (type == MPACKET_VERIFY)
+ value |= SVER;
+- } else {
+- value &= ~SVER;
++ else if (type == MPACKET_RESPONSE)
+ value |= SRSP;
+- }
+
+ writel(value, ioaddr + MAC_FPE_CTRL_STS);
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+index 53c138d0ff480..34e620790eb37 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+@@ -153,9 +153,11 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate);
+ void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt);
+-void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
++void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++ u32 num_txq, u32 num_rxq,
+ bool enable);
+ void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
++ struct stmmac_fpe_cfg *cfg,
+ enum stmmac_mpacket_type type);
+ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index f30e08a106cbe..c2181c277291b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -1441,7 +1441,8 @@ static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ return 0;
+ }
+
+-static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
++static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++ u32 num_txq,
+ u32 num_rxq, bool enable)
+ {
+ u32 value;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
+index 592b4067f9b8f..b2b9cf04bc726 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
+@@ -392,9 +392,11 @@ struct stmmac_ops {
+ unsigned int ptp_rate);
+ void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt);
+- void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
++ void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
++ u32 num_txq, u32 num_rxq,
+ bool enable);
+ void (*fpe_send_mpacket)(void __iomem *ioaddr,
++ struct stmmac_fpe_cfg *cfg,
+ enum stmmac_mpacket_type type);
+ int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
+ };
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 9f76c2f7d513b..69aac8ed84f67 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -957,7 +957,8 @@ static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
+ bool *hs_enable = &fpe_cfg->hs_enable;
+
+ if (is_up && *hs_enable) {
+- stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
++ stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
++ MPACKET_VERIFY);
+ } else {
+ *lo_state = FPE_STATE_OFF;
+ *lp_state = FPE_STATE_OFF;
+@@ -5704,6 +5705,7 @@ static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
+ /* If user has requested FPE enable, quickly response */
+ if (*hs_enable)
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
++ fpe_cfg,
+ MPACKET_RESPONSE);
+ }
+
+@@ -7028,6 +7030,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work)
+ if (*lo_state == FPE_STATE_ENTERING_ON &&
+ *lp_state == FPE_STATE_ENTERING_ON) {
+ stmmac_fpe_configure(priv, priv->ioaddr,
++ fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ *enable);
+@@ -7046,6 +7049,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work)
+ netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
+ *lo_state, *lp_state);
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
++ fpe_cfg,
+ MPACKET_VERIFY);
+ }
+ /* Sleep then retry */
+@@ -7060,6 +7064,7 @@ void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
+ if (priv->plat->fpe_cfg->hs_enable != enable) {
+ if (enable) {
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
++ priv->plat->fpe_cfg,
+ MPACKET_VERIFY);
+ } else {
+ priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
+@@ -7472,6 +7477,7 @@ int stmmac_suspend(struct device *dev)
+ if (priv->dma_cap.fpesel) {
+ /* Disable FPE */
+ stmmac_fpe_configure(priv, priv->ioaddr,
++ priv->plat->fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use, false);
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+index 773e415cc2de6..390c900832cd2 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+@@ -1073,6 +1073,7 @@ disable:
+
+ priv->plat->fpe_cfg->enable = false;
+ stmmac_fpe_configure(priv, priv->ioaddr,
++ priv->plat->fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ false);
+diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
+index ca7bf7f897d36..c8cbd85adcf99 100644
+--- a/drivers/net/hyperv/Kconfig
++++ b/drivers/net/hyperv/Kconfig
+@@ -3,5 +3,6 @@ config HYPERV_NET
+ tristate "Microsoft Hyper-V virtual network driver"
+ depends on HYPERV
+ select UCS2_STRING
++ select NLS
+ help
+ Select this option to enable the Hyper-V virtual network driver.
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 345e341d22338..4d833781294a4 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -763,7 +763,7 @@ enum rtl_register_content {
+
+ /* rtl8152 flags */
+ enum rtl8152_flags {
+- RTL8152_UNPLUG = 0,
++ RTL8152_INACCESSIBLE = 0,
+ RTL8152_SET_RX_MODE,
+ WORK_ENABLE,
+ RTL8152_LINK_CHG,
+@@ -1244,7 +1244,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
+ static void rtl_set_unplug(struct r8152 *tp)
+ {
+ if (tp->udev->state == USB_STATE_NOTATTACHED) {
+- set_bit(RTL8152_UNPLUG, &tp->flags);
++ set_bit(RTL8152_INACCESSIBLE, &tp->flags);
+ smp_mb__after_atomic();
+ }
+ }
+@@ -1255,7 +1255,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
+ u16 limit = 64;
+ int ret = 0;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
+ /* both size and indix must be 4 bytes align */
+@@ -1299,7 +1299,7 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
+ u16 byteen_start, byteen_end, byen;
+ u16 limit = 512;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
+ /* both size and indix must be 4 bytes align */
+@@ -1529,7 +1529,7 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
+ struct r8152 *tp = netdev_priv(netdev);
+ int ret;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
+ if (phy_id != R8152_PHY_ID)
+@@ -1545,7 +1545,7 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
+ {
+ struct r8152 *tp = netdev_priv(netdev);
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ if (phy_id != R8152_PHY_ID)
+@@ -1750,7 +1750,7 @@ static void read_bulk_callback(struct urb *urb)
+ if (!tp)
+ return;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+@@ -1842,7 +1842,7 @@ static void write_bulk_callback(struct urb *urb)
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+ return;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ if (!skb_queue_empty(&tp->tx_queue))
+@@ -1863,7 +1863,7 @@ static void intr_callback(struct urb *urb)
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+ return;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ switch (status) {
+@@ -2607,7 +2607,7 @@ static void bottom_half(struct tasklet_struct *t)
+ {
+ struct r8152 *tp = from_tasklet(tp, t, tx_tl);
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+@@ -2650,7 +2650,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
+ int ret;
+
+ /* The rx would be stopped, so skip submitting */
+- if (test_bit(RTL8152_UNPLUG, &tp->flags) ||
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) ||
+ !test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev))
+ return 0;
+
+@@ -2857,6 +2857,8 @@ static void rtl8152_nic_reset(struct r8152 *tp)
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST);
+
+ for (i = 0; i < 1000; i++) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ break;
+ if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST))
+ break;
+ usleep_range(100, 400);
+@@ -3050,7 +3052,7 @@ static int rtl_enable(struct r8152 *tp)
+
+ static int rtl8152_enable(struct r8152 *tp)
+ {
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
+ set_tx_qlen(tp);
+@@ -3137,7 +3139,7 @@ static int rtl8153_enable(struct r8152 *tp)
+ {
+ u32 ocp_data;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
+ set_tx_qlen(tp);
+@@ -3169,7 +3171,7 @@ static void rtl_disable(struct r8152 *tp)
+ u32 ocp_data;
+ int i;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
+ rtl_drop_queued_tx(tp);
+ return;
+ }
+@@ -3186,6 +3188,8 @@ static void rtl_disable(struct r8152 *tp)
+ rxdy_gated_en(tp, true);
+
+ for (i = 0; i < 1000; i++) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ break;
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY)
+ break;
+@@ -3193,6 +3197,8 @@ static void rtl_disable(struct r8152 *tp)
+ }
+
+ for (i = 0; i < 1000; i++) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ break;
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY)
+ break;
+ usleep_range(1000, 2000);
+@@ -3623,7 +3629,7 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
+ }
+
+ msleep(20);
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ break;
+ }
+
+@@ -3655,7 +3661,7 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
+ int i;
+
+ for (i = 0; i < 500; i++) {
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ AUTOLOAD_DONE)
+@@ -3697,7 +3703,7 @@ static void r8153c_ups_en(struct r8152 *tp, bool enable)
+ int i;
+
+ for (i = 0; i < 500; i++) {
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+ if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
+ AUTOLOAD_DONE)
+@@ -4062,8 +4068,8 @@ static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
+ for (i = 0; wait && i < 5000; i++) {
+ u32 ocp_data;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+- break;
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ return -ENODEV;
+
+ usleep_range(1000, 2000);
+ ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT);
+@@ -5381,6 +5387,8 @@ static void wait_oob_link_list_ready(struct r8152 *tp)
+ int i;
+
+ for (i = 0; i < 1000; i++) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ break;
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+@@ -5395,6 +5403,8 @@ static void r8156b_wait_loading_flash(struct r8152 *tp)
+ int i;
+
+ for (i = 0; i < 100; i++) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ break;
+ if (ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & GPHY_PATCH_DONE)
+ break;
+ usleep_range(1000, 2000);
+@@ -5517,6 +5527,8 @@ static int r8153_pre_firmware_1(struct r8152 *tp)
+ for (i = 0; i < 104; i++) {
+ u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_WDT1_CTRL);
+
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ return -ENODEV;
+ if (!(ocp_data & WTD1_EN))
+ break;
+ usleep_range(1000, 2000);
+@@ -5673,6 +5685,8 @@ static void r8153_aldps_en(struct r8152 *tp, bool enable)
+ data &= ~EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ for (i = 0; i < 20; i++) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
++ return;
+ usleep_range(1000, 2000);
+ if (ocp_read_word(tp, MCU_TYPE_PLA, 0xe000) & 0x0100)
+ break;
+@@ -6026,7 +6040,7 @@ static int rtl8156_enable(struct r8152 *tp)
+ u32 ocp_data;
+ u16 speed;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
+ r8156_fc_parameter(tp);
+@@ -6084,7 +6098,7 @@ static int rtl8156b_enable(struct r8152 *tp)
+ u32 ocp_data;
+ u16 speed;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
+ set_tx_qlen(tp);
+@@ -6270,7 +6284,7 @@ out:
+
+ static void rtl8152_up(struct r8152 *tp)
+ {
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ r8152_aldps_en(tp, false);
+@@ -6280,7 +6294,7 @@ static void rtl8152_up(struct r8152 *tp)
+
+ static void rtl8152_down(struct r8152 *tp)
+ {
+- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
+ rtl_drop_queued_tx(tp);
+ return;
+ }
+@@ -6295,7 +6309,7 @@ static void rtl8153_up(struct r8152 *tp)
+ {
+ u32 ocp_data;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ r8153_u1u2en(tp, false);
+@@ -6335,7 +6349,7 @@ static void rtl8153_down(struct r8152 *tp)
+ {
+ u32 ocp_data;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
+ rtl_drop_queued_tx(tp);
+ return;
+ }
+@@ -6356,7 +6370,7 @@ static void rtl8153b_up(struct r8152 *tp)
+ {
+ u32 ocp_data;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ r8153b_u1u2en(tp, false);
+@@ -6380,7 +6394,7 @@ static void rtl8153b_down(struct r8152 *tp)
+ {
+ u32 ocp_data;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
+ rtl_drop_queued_tx(tp);
+ return;
+ }
+@@ -6417,7 +6431,7 @@ static void rtl8153c_up(struct r8152 *tp)
+ {
+ u32 ocp_data;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ r8153b_u1u2en(tp, false);
+@@ -6498,7 +6512,7 @@ static void rtl8156_up(struct r8152 *tp)
+ {
+ u32 ocp_data;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ r8153b_u1u2en(tp, false);
+@@ -6571,7 +6585,7 @@ static void rtl8156_down(struct r8152 *tp)
+ {
+ u32 ocp_data;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
+ rtl_drop_queued_tx(tp);
+ return;
+ }
+@@ -6709,7 +6723,7 @@ static void rtl_work_func_t(struct work_struct *work)
+ /* If the device is unplugged or !netif_running(), the workqueue
+ * doesn't need to wake the device, and could return directly.
+ */
+- if (test_bit(RTL8152_UNPLUG, &tp->flags) || !netif_running(tp->netdev))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !netif_running(tp->netdev))
+ return;
+
+ if (usb_autopm_get_interface(tp->intf) < 0)
+@@ -6748,7 +6762,7 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work)
+ {
+ struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work);
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ if (usb_autopm_get_interface(tp->intf) < 0)
+@@ -6875,7 +6889,7 @@ static int rtl8152_close(struct net_device *netdev)
+ netif_stop_queue(netdev);
+
+ res = usb_autopm_get_interface(tp->intf);
+- if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) {
++ if (res < 0 || test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
+ rtl_drop_queued_tx(tp);
+ rtl_stop_rx(tp);
+ } else {
+@@ -6908,7 +6922,7 @@ static void r8152b_init(struct r8152 *tp)
+ u32 ocp_data;
+ u16 data;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+@@ -6952,7 +6966,7 @@ static void r8153_init(struct r8152 *tp)
+ u16 data;
+ int i;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ r8153_u1u2en(tp, false);
+@@ -6963,7 +6977,7 @@ static void r8153_init(struct r8152 *tp)
+ break;
+
+ msleep(20);
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ break;
+ }
+
+@@ -7092,7 +7106,7 @@ static void r8153b_init(struct r8152 *tp)
+ u16 data;
+ int i;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ r8153b_u1u2en(tp, false);
+@@ -7103,7 +7117,7 @@ static void r8153b_init(struct r8152 *tp)
+ break;
+
+ msleep(20);
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ break;
+ }
+
+@@ -7174,7 +7188,7 @@ static void r8153c_init(struct r8152 *tp)
+ u16 data;
+ int i;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ r8153b_u1u2en(tp, false);
+@@ -7194,7 +7208,7 @@ static void r8153c_init(struct r8152 *tp)
+ break;
+
+ msleep(20);
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+ }
+
+@@ -8023,7 +8037,7 @@ static void r8156_init(struct r8152 *tp)
+ u16 data;
+ int i;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
+@@ -8044,7 +8058,7 @@ static void r8156_init(struct r8152 *tp)
+ break;
+
+ msleep(20);
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+ }
+
+@@ -8119,7 +8133,7 @@ static void r8156b_init(struct r8152 *tp)
+ u16 data;
+ int i;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
+@@ -8153,7 +8167,7 @@ static void r8156b_init(struct r8152 *tp)
+ break;
+
+ msleep(20);
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+ }
+
+@@ -9219,7 +9233,7 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+ struct mii_ioctl_data *data = if_mii(rq);
+ int res;
+
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
+ res = usb_autopm_get_interface(tp->intf);
+@@ -9321,7 +9335,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
+
+ static void rtl8152_unload(struct r8152 *tp)
+ {
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ if (tp->version != RTL_VER_01)
+@@ -9330,7 +9344,7 @@ static void rtl8152_unload(struct r8152 *tp)
+
+ static void rtl8153_unload(struct r8152 *tp)
+ {
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ r8153_power_cut_en(tp, false);
+@@ -9338,7 +9352,7 @@ static void rtl8153_unload(struct r8152 *tp)
+
+ static void rtl8153b_unload(struct r8152 *tp)
+ {
+- if (test_bit(RTL8152_UNPLUG, &tp->flags))
++ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
+
+ r8153b_power_cut_en(tp, false);
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 69f9e69208f68..118bf08a708b9 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -154,6 +154,11 @@ enum nvme_quirks {
+ * No temperature thresholds for channels other than 0 (Composite).
+ */
+ NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19),
++
++ /*
++ * Disables simple suspend/resume path.
++ */
++ NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20),
+ };
+
+ /*
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 886c3fc9578e4..3d01290994d89 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3093,6 +3093,18 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
+ dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
+ return NVME_QUIRK_SIMPLE_SUSPEND;
++ } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 ||
++ pdev->device == 0x500f)) {
++ /*
++ * Exclude some Kingston NV1 and A2000 devices from
++ * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a
++ * lot fo energy with s2idle sleep on some TUXEDO platforms.
++ */
++ if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
++ dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") ||
++ dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") ||
++ dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"))
++ return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
+ }
+
+ return 0;
+@@ -3133,7 +3145,9 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
+ dev->dev = get_device(&pdev->dev);
+
+ quirks |= check_vendor_combination_bug(pdev);
+- if (!noacpi && acpi_storage_d3(&pdev->dev)) {
++ if (!noacpi &&
++ !(quirks & NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND) &&
++ acpi_storage_d3(&pdev->dev)) {
+ /*
+ * Some systems use a bios work around to ask for D3 on
+ * platforms that support kernel managed suspend.
+diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
+index 0fbf331a748fd..9bb9fe0fad07c 100644
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -104,8 +104,9 @@ int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
+ *
+ * Returns the new state of a device based on the notifier used.
+ *
+- * Return: 0 on device going from enabled to disabled, 1 on device
+- * going from disabled to enabled and -1 on no change.
++ * Return: OF_RECONFIG_CHANGE_REMOVE on device going from enabled to
++ * disabled, OF_RECONFIG_CHANGE_ADD on device going from disabled to
++ * enabled and OF_RECONFIG_NO_CHANGE on no change.
+ */
+ int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *pr)
+ {
+diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
+index 5784dc20fb382..4605758d32146 100644
+--- a/drivers/parport/parport_pc.c
++++ b/drivers/parport/parport_pc.c
+@@ -2614,6 +2614,8 @@ enum parport_pc_pci_cards {
+ netmos_9865,
+ quatech_sppxp100,
+ wch_ch382l,
++ brainboxes_uc146,
++ brainboxes_px203,
+ };
+
+
+@@ -2678,6 +2680,8 @@ static struct parport_pc_pci {
+ /* netmos_9865 */ { 1, { { 0, -1 }, } },
+ /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
+ /* wch_ch382l */ { 1, { { 2, -1 }, } },
++ /* brainboxes_uc146 */ { 1, { { 3, -1 }, } },
++ /* brainboxes_px203 */ { 1, { { 0, -1 }, } },
+ };
+
+ static const struct pci_device_id parport_pc_pci_tbl[] = {
+@@ -2771,6 +2775,23 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
+ /* WCH CH382L PCI-E single parallel port card */
+ { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
++ /* Brainboxes IX-500/550 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x402a,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
++ /* Brainboxes UC-146/UC-157 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x0be1,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
++ { PCI_VENDOR_ID_INTASHIELD, 0x0be2,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
++ /* Brainboxes PX-146/PX-257 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x401c,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
++ /* Brainboxes PX-203 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x4007,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px203 },
++ /* Brainboxes PX-475 */
++ { PCI_VENDOR_ID_INTASHIELD, 0x401f,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { 0, } /* terminate list */
+ };
+ MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
+diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
+index 1c7a288b59a5c..6a171a4f9dc68 100644
+--- a/drivers/platform/mellanox/mlxbf-bootctl.c
++++ b/drivers/platform/mellanox/mlxbf-bootctl.c
+@@ -17,6 +17,7 @@
+
+ #define MLXBF_BOOTCTL_SB_SECURE_MASK 0x03
+ #define MLXBF_BOOTCTL_SB_TEST_MASK 0x0c
++#define MLXBF_BOOTCTL_SB_DEV_MASK BIT(4)
+
+ #define MLXBF_SB_KEY_NUM 4
+
+@@ -37,11 +38,18 @@ static struct mlxbf_bootctl_name boot_names[] = {
+ { MLXBF_BOOTCTL_NONE, "none" },
+ };
+
++enum {
++ MLXBF_BOOTCTL_SB_LIFECYCLE_PRODUCTION = 0,
++ MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE = 1,
++ MLXBF_BOOTCTL_SB_LIFECYCLE_GA_NON_SECURE = 2,
++ MLXBF_BOOTCTL_SB_LIFECYCLE_RMA = 3
++};
++
+ static const char * const mlxbf_bootctl_lifecycle_states[] = {
+- [0] = "Production",
+- [1] = "GA Secured",
+- [2] = "GA Non-Secured",
+- [3] = "RMA",
++ [MLXBF_BOOTCTL_SB_LIFECYCLE_PRODUCTION] = "Production",
++ [MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE] = "GA Secured",
++ [MLXBF_BOOTCTL_SB_LIFECYCLE_GA_NON_SECURE] = "GA Non-Secured",
++ [MLXBF_BOOTCTL_SB_LIFECYCLE_RMA] = "RMA",
+ };
+
+ /* ARM SMC call which is atomic and no need for lock. */
+@@ -165,25 +173,30 @@ static ssize_t second_reset_action_store(struct device *dev,
+ static ssize_t lifecycle_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
++ int status_bits;
++ int use_dev_key;
++ int test_state;
+ int lc_state;
+
+- lc_state = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
+- MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
+- if (lc_state < 0)
+- return lc_state;
++ status_bits = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
++ MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
++ if (status_bits < 0)
++ return status_bits;
+
+- lc_state &=
+- MLXBF_BOOTCTL_SB_TEST_MASK | MLXBF_BOOTCTL_SB_SECURE_MASK;
++ use_dev_key = status_bits & MLXBF_BOOTCTL_SB_DEV_MASK;
++ test_state = status_bits & MLXBF_BOOTCTL_SB_TEST_MASK;
++ lc_state = status_bits & MLXBF_BOOTCTL_SB_SECURE_MASK;
+
+ /*
+ * If the test bits are set, we specify that the current state may be
+ * due to using the test bits.
+ */
+- if (lc_state & MLXBF_BOOTCTL_SB_TEST_MASK) {
+- lc_state &= MLXBF_BOOTCTL_SB_SECURE_MASK;
+-
++ if (test_state) {
+ return sprintf(buf, "%s(test)\n",
+ mlxbf_bootctl_lifecycle_states[lc_state]);
++ } else if (use_dev_key &&
++ (lc_state == MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE)) {
++ return sprintf(buf, "Secured (development)\n");
+ }
+
+ return sprintf(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
+index 2d4bbe99959ef..db7a1d360cd2c 100644
+--- a/drivers/platform/mellanox/mlxbf-pmc.c
++++ b/drivers/platform/mellanox/mlxbf-pmc.c
+@@ -1202,6 +1202,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+ attr->dev_attr.show = mlxbf_pmc_event_list_show;
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "event_list");
++ if (!attr->dev_attr.attr.name)
++ return -ENOMEM;
+ pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
+ attr = NULL;
+
+@@ -1214,6 +1216,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "enable");
++ if (!attr->dev_attr.attr.name)
++ return -ENOMEM;
+ pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ attr = NULL;
+ }
+@@ -1240,6 +1244,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "counter%d", j);
++ if (!attr->dev_attr.attr.name)
++ return -ENOMEM;
+ pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ attr = NULL;
+
+@@ -1251,6 +1257,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "event%d", j);
++ if (!attr->dev_attr.attr.name)
++ return -ENOMEM;
+ pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
+ attr = NULL;
+ }
+@@ -1283,6 +1291,8 @@ static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
+ attr->nr = blk_num;
+ attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ events[j].evt_name);
++ if (!attr->dev_attr.attr.name)
++ return -ENOMEM;
+ pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
+ attr = NULL;
+ i++;
+@@ -1311,6 +1321,8 @@ static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
+ pmc->block[blk_num].block_attr_grp.attrs = pmc->block[blk_num].block_attr;
+ pmc->block[blk_num].block_attr_grp.name = devm_kasprintf(
+ dev, GFP_KERNEL, pmc->block_name[blk_num]);
++ if (!pmc->block[blk_num].block_attr_grp.name)
++ return -ENOMEM;
+ pmc->groups[blk_num] = &pmc->block[blk_num].block_attr_grp;
+
+ return 0;
+@@ -1442,6 +1454,8 @@ static int mlxbf_pmc_probe(struct platform_device *pdev)
+
+ pmc->hwmon_dev = devm_hwmon_device_register_with_groups(
+ dev, "bfperf", pmc, pmc->groups);
++ if (IS_ERR(pmc->hwmon_dev))
++ return PTR_ERR(pmc->hwmon_dev);
+ platform_set_drvdata(pdev, pmc);
+
+ return 0;
+diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
+index 1a6373dea109c..6152be38398c4 100644
+--- a/drivers/platform/surface/aggregator/core.c
++++ b/drivers/platform/surface/aggregator/core.c
+@@ -231,9 +231,12 @@ static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
+ size_t n)
+ {
+ struct ssam_controller *ctrl;
++ int ret;
+
+ ctrl = serdev_device_get_drvdata(dev);
+- return ssam_controller_receive_buf(ctrl, buf, n);
++ ret = ssam_controller_receive_buf(ctrl, buf, n);
++
++ return ret < 0 ? 0 : ret;
+ }
+
+ static void ssam_write_wakeup(struct serdev_device *dev)
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 1396a839dd8a4..d5acef3202dad 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -271,6 +271,7 @@ config ASUS_WMI
+ depends on RFKILL || RFKILL = n
+ depends on HOTPLUG_PCI
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
++ depends on SERIO_I8042 || SERIO_I8042 = n
+ select INPUT_SPARSEKMAP
+ select LEDS_CLASS
+ select NEW_LEDS
+@@ -287,7 +288,6 @@ config ASUS_WMI
+ config ASUS_NB_WMI
+ tristate "Asus Notebook WMI Driver"
+ depends on ASUS_WMI
+- depends on SERIO_I8042 || SERIO_I8042 = n
+ help
+ This is a driver for newer Asus notebooks. It adds extra features
+ like wireless radio and bluetooth control, leds, hotkeys, backlight...
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index df1db54d4e183..af3da303e2b15 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -501,8 +501,6 @@ static const struct dmi_system_id asus_quirks[] = {
+
+ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
+ {
+- int ret;
+-
+ quirks = &quirk_asus_unknown;
+ dmi_check_system(asus_quirks);
+
+@@ -517,15 +515,6 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
+
+ if (tablet_mode_sw != -1)
+ quirks->tablet_switch_mode = tablet_mode_sw;
+-
+- if (quirks->i8042_filter) {
+- ret = i8042_install_filter(quirks->i8042_filter);
+- if (ret) {
+- pr_warn("Unable to install key filter\n");
+- return;
+- }
+- pr_info("Using i8042 filter function for receiving events\n");
+- }
+ }
+
+ static const struct key_entry asus_nb_wmi_keymap[] = {
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 49dd55b8e8faf..296150eaef929 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -3839,6 +3839,12 @@ static int asus_wmi_add(struct platform_device *pdev)
+ goto fail_wmi_handler;
+ }
+
++ if (asus->driver->quirks->i8042_filter) {
++ err = i8042_install_filter(asus->driver->quirks->i8042_filter);
++ if (err)
++ pr_warn("Unable to install key filter - %d\n", err);
++ }
++
+ asus_wmi_battery_init(asus);
+
+ asus_wmi_debugfs_init(asus);
+@@ -3873,6 +3879,8 @@ static int asus_wmi_remove(struct platform_device *device)
+ struct asus_wmi *asus;
+
+ asus = platform_get_drvdata(device);
++ if (asus->driver->quirks->i8042_filter)
++ i8042_remove_filter(asus->driver->quirks->i8042_filter);
+ wmi_remove_notify_handler(asus->driver->event_guid);
+ asus_wmi_backlight_exit(asus);
+ asus_wmi_input_exit(asus);
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index 2b79377cc21e2..b3f3e23a64eee 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -1227,6 +1227,11 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
+ if (debug_dump_wdg)
+ wmi_dump_wdg(&gblock[i]);
+
++ if (!gblock[i].instance_count) {
++ dev_info(wmi_bus_dev, FW_INFO "%pUL has zero instances\n", &gblock[i].guid);
++ continue;
++ }
++
+ if (guid_already_parsed_for_legacy(device, &gblock[i].guid))
+ continue;
+
+diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
+index 8a2f18fa3faf5..9193c3b8edebe 100644
+--- a/drivers/powercap/dtpm_cpu.c
++++ b/drivers/powercap/dtpm_cpu.c
+@@ -140,6 +140,8 @@ static void pd_release(struct dtpm *dtpm)
+ if (policy) {
+ for_each_cpu(dtpm_cpu->cpu, policy->related_cpus)
+ per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL;
++
++ cpufreq_cpu_put(policy);
+ }
+
+ kfree(dtpm_cpu);
+@@ -191,12 +193,16 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
+ return 0;
+
+ pd = em_cpu_get(cpu);
+- if (!pd || em_is_artificial(pd))
+- return -EINVAL;
++ if (!pd || em_is_artificial(pd)) {
++ ret = -EINVAL;
++ goto release_policy;
++ }
+
+ dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL);
+- if (!dtpm_cpu)
+- return -ENOMEM;
++ if (!dtpm_cpu) {
++ ret = -ENOMEM;
++ goto release_policy;
++ }
+
+ dtpm_init(&dtpm_cpu->dtpm, &dtpm_ops);
+ dtpm_cpu->cpu = cpu;
+@@ -216,6 +222,7 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
+ if (ret)
+ goto out_dtpm_unregister;
+
++ cpufreq_cpu_put(policy);
+ return 0;
+
+ out_dtpm_unregister:
+@@ -227,6 +234,8 @@ out_kfree_dtpm_cpu:
+ per_cpu(dtpm_per_cpu, cpu) = NULL;
+ kfree(dtpm_cpu);
+
++release_policy:
++ cpufreq_cpu_put(policy);
+ return ret;
+ }
+
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 50a577ac3bb42..b6df0d4319072 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -2710,6 +2710,7 @@ init_wrb_hndl_failed:
+ kfree(pwrb_context->pwrb_handle_base);
+ kfree(pwrb_context->pwrb_handle_basestd);
+ }
++ kfree(phwi_ctxt->be_wrbq);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
+index 64f0e047c23d2..4b10921276942 100644
+--- a/drivers/tee/optee/device.c
++++ b/drivers/tee/optee/device.c
+@@ -60,7 +60,16 @@ static void optee_release_device(struct device *dev)
+ kfree(optee_device);
+ }
+
+-static int optee_register_device(const uuid_t *device_uuid)
++static ssize_t need_supplicant_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ return 0;
++}
++
++static DEVICE_ATTR_RO(need_supplicant);
++
++static int optee_register_device(const uuid_t *device_uuid, u32 func)
+ {
+ struct tee_client_device *optee_device = NULL;
+ int rc;
+@@ -83,6 +92,10 @@ static int optee_register_device(const uuid_t *device_uuid)
+ put_device(&optee_device->dev);
+ }
+
++ if (func == PTA_CMD_GET_DEVICES_SUPP)
++ device_create_file(&optee_device->dev,
++ &dev_attr_need_supplicant);
++
+ return rc;
+ }
+
+@@ -142,7 +155,7 @@ static int __optee_enumerate_devices(u32 func)
+ num_devices = shm_size / sizeof(uuid_t);
+
+ for (idx = 0; idx < num_devices; idx++) {
+- rc = optee_register_device(&device_uuid[idx]);
++ rc = optee_register_device(&device_uuid[idx], func);
+ if (rc)
+ goto out_shm;
+ }
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index 7db51781289ed..88035100b86c6 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -795,6 +795,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
+ { "INT33C5", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT3434", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT3435", (kernel_ulong_t)&dw8250_dw_apb },
++ { "INTC10EE", (kernel_ulong_t)&dw8250_dw_apb },
+ { },
+ };
+ MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
+diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
+index f271becfc46c1..02c9b98a6bbf1 100644
+--- a/drivers/tty/serial/8250/8250_early.c
++++ b/drivers/tty/serial/8250/8250_early.c
+@@ -197,6 +197,7 @@ static int __init early_omap8250_setup(struct earlycon_device *device,
+ OF_EARLYCON_DECLARE(omap8250, "ti,omap2-uart", early_omap8250_setup);
+ OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup);
+ OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup);
++OF_EARLYCON_DECLARE(omap8250, "ti,am654-uart", early_omap8250_setup);
+
+ #endif
+
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 2e21f74a24705..0b04d810b3e61 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -825,7 +825,7 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
+ if (priv->habit & UART_HAS_RHR_IT_DIS) {
+ reg = serial_in(p, UART_OMAP_IER2);
+ reg &= ~UART_OMAP_IER2_RHR_IT_DIS;
+- serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
++ serial_out(p, UART_OMAP_IER2, reg);
+ }
+
+ dmaengine_tx_status(rxchan, cookie, &state);
+@@ -967,7 +967,7 @@ static int omap_8250_rx_dma(struct uart_8250_port *p)
+ if (priv->habit & UART_HAS_RHR_IT_DIS) {
+ reg = serial_in(p, UART_OMAP_IER2);
+ reg |= UART_OMAP_IER2_RHR_IT_DIS;
+- serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
++ serial_out(p, UART_OMAP_IER2, reg);
+ }
+
+ dma_async_issue_pending(dma->rxchan);
+@@ -1186,10 +1186,12 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
+
+ status = serial_port_in(port, UART_LSR);
+
+- if (priv->habit & UART_HAS_EFR2)
+- am654_8250_handle_rx_dma(up, iir, status);
+- else
+- status = omap_8250_handle_rx_dma(up, iir, status);
++ if ((iir & 0x3f) != UART_IIR_THRI) {
++ if (priv->habit & UART_HAS_EFR2)
++ am654_8250_handle_rx_dma(up, iir, status);
++ else
++ status = omap_8250_handle_rx_dma(up, iir, status);
++ }
+
+ serial8250_modem_status(up);
+ if (status & UART_LSR_THRE && up->dma->tx_err) {
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 0a1cc36f93aa7..c74eaf2552c32 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -218,17 +218,18 @@ static struct vendor_data vendor_st = {
+
+ /* Deals with DMA transactions */
+
+-struct pl011_sgbuf {
+- struct scatterlist sg;
+- char *buf;
++struct pl011_dmabuf {
++ dma_addr_t dma;
++ size_t len;
++ char *buf;
+ };
+
+ struct pl011_dmarx_data {
+ struct dma_chan *chan;
+ struct completion complete;
+ bool use_buf_b;
+- struct pl011_sgbuf sgbuf_a;
+- struct pl011_sgbuf sgbuf_b;
++ struct pl011_dmabuf dbuf_a;
++ struct pl011_dmabuf dbuf_b;
+ dma_cookie_t cookie;
+ bool running;
+ struct timer_list timer;
+@@ -241,7 +242,8 @@ struct pl011_dmarx_data {
+
+ struct pl011_dmatx_data {
+ struct dma_chan *chan;
+- struct scatterlist sg;
++ dma_addr_t dma;
++ size_t len;
+ char *buf;
+ bool queued;
+ };
+@@ -365,32 +367,24 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
+
+ #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
+
+-static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
++static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
+ enum dma_data_direction dir)
+ {
+- dma_addr_t dma_addr;
+-
+- sg->buf = dma_alloc_coherent(chan->device->dev,
+- PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
+- if (!sg->buf)
++ db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
++ &db->dma, GFP_KERNEL);
++ if (!db->buf)
+ return -ENOMEM;
+-
+- sg_init_table(&sg->sg, 1);
+- sg_set_page(&sg->sg, phys_to_page(dma_addr),
+- PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
+- sg_dma_address(&sg->sg) = dma_addr;
+- sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
++ db->len = PL011_DMA_BUFFER_SIZE;
+
+ return 0;
+ }
+
+-static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
++static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
+ enum dma_data_direction dir)
+ {
+- if (sg->buf) {
++ if (db->buf) {
+ dma_free_coherent(chan->device->dev,
+- PL011_DMA_BUFFER_SIZE, sg->buf,
+- sg_dma_address(&sg->sg));
++ PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
+ }
+ }
+
+@@ -551,8 +545,8 @@ static void pl011_dma_tx_callback(void *data)
+
+ spin_lock_irqsave(&uap->port.lock, flags);
+ if (uap->dmatx.queued)
+- dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
+- DMA_TO_DEVICE);
++ dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
++ dmatx->len, DMA_TO_DEVICE);
+
+ dmacr = uap->dmacr;
+ uap->dmacr = dmacr & ~UART011_TXDMAE;
+@@ -638,18 +632,19 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
+ memcpy(&dmatx->buf[first], &xmit->buf[0], second);
+ }
+
+- dmatx->sg.length = count;
+-
+- if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
++ dmatx->len = count;
++ dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
++ DMA_TO_DEVICE);
++ if (dmatx->dma == DMA_MAPPING_ERROR) {
+ uap->dmatx.queued = false;
+ dev_dbg(uap->port.dev, "unable to map TX DMA\n");
+ return -EBUSY;
+ }
+
+- desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
++ desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+- dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
++ dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
+ uap->dmatx.queued = false;
+ /*
+ * If DMA cannot be used right now, we complete this
+@@ -813,8 +808,8 @@ __acquires(&uap->port.lock)
+ dmaengine_terminate_async(uap->dmatx.chan);
+
+ if (uap->dmatx.queued) {
+- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+- DMA_TO_DEVICE);
++ dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
++ uap->dmatx.len, DMA_TO_DEVICE);
+ uap->dmatx.queued = false;
+ uap->dmacr &= ~UART011_TXDMAE;
+ pl011_write(uap->dmacr, uap, REG_DMACR);
+@@ -828,15 +823,15 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
+ struct dma_chan *rxchan = uap->dmarx.chan;
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_async_tx_descriptor *desc;
+- struct pl011_sgbuf *sgbuf;
++ struct pl011_dmabuf *dbuf;
+
+ if (!rxchan)
+ return -EIO;
+
+ /* Start the RX DMA job */
+- sgbuf = uap->dmarx.use_buf_b ?
+- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+- desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
++ dbuf = uap->dmarx.use_buf_b ?
++ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
++ desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ /*
+@@ -876,8 +871,8 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+ bool readfifo)
+ {
+ struct tty_port *port = &uap->port.state->port;
+- struct pl011_sgbuf *sgbuf = use_buf_b ?
+- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
++ struct pl011_dmabuf *dbuf = use_buf_b ?
++ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
+ int dma_count = 0;
+ u32 fifotaken = 0; /* only used for vdbg() */
+
+@@ -886,7 +881,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+
+ if (uap->dmarx.poll_rate) {
+ /* The data can be taken by polling */
+- dmataken = sgbuf->sg.length - dmarx->last_residue;
++ dmataken = dbuf->len - dmarx->last_residue;
+ /* Recalculate the pending size */
+ if (pending >= dmataken)
+ pending -= dmataken;
+@@ -900,7 +895,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+ * Note that tty_insert_flip_buf() tries to take as many chars
+ * as it can.
+ */
+- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
++ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
+ pending);
+
+ uap->port.icount.rx += dma_count;
+@@ -911,7 +906,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
+
+ /* Reset the last_residue for Rx DMA poll */
+ if (uap->dmarx.poll_rate)
+- dmarx->last_residue = sgbuf->sg.length;
++ dmarx->last_residue = dbuf->len;
+
+ /*
+ * Only continue with trying to read the FIFO if all DMA chars have
+@@ -946,8 +941,8 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
+ {
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
+- &dmarx->sgbuf_b : &dmarx->sgbuf_a;
++ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
++ &dmarx->dbuf_b : &dmarx->dbuf_a;
+ size_t pending;
+ struct dma_tx_state state;
+ enum dma_status dmastat;
+@@ -969,7 +964,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
+ pl011_write(uap->dmacr, uap, REG_DMACR);
+ uap->dmarx.running = false;
+
+- pending = sgbuf->sg.length - state.residue;
++ pending = dbuf->len - state.residue;
+ BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ dmaengine_terminate_all(rxchan);
+@@ -996,8 +991,8 @@ static void pl011_dma_rx_callback(void *data)
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
+ bool lastbuf = dmarx->use_buf_b;
+- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
+- &dmarx->sgbuf_b : &dmarx->sgbuf_a;
++ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
++ &dmarx->dbuf_b : &dmarx->dbuf_a;
+ size_t pending;
+ struct dma_tx_state state;
+ int ret;
+@@ -1015,7 +1010,7 @@ static void pl011_dma_rx_callback(void *data)
+ * the DMA irq handler. So we check the residue here.
+ */
+ rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+- pending = sgbuf->sg.length - state.residue;
++ pending = dbuf->len - state.residue;
+ BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ dmaengine_terminate_all(rxchan);
+@@ -1067,16 +1062,16 @@ static void pl011_dma_rx_poll(struct timer_list *t)
+ unsigned long flags;
+ unsigned int dmataken = 0;
+ unsigned int size = 0;
+- struct pl011_sgbuf *sgbuf;
++ struct pl011_dmabuf *dbuf;
+ int dma_count;
+ struct dma_tx_state state;
+
+- sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
++ dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
+ rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ if (likely(state.residue < dmarx->last_residue)) {
+- dmataken = sgbuf->sg.length - dmarx->last_residue;
++ dmataken = dbuf->len - dmarx->last_residue;
+ size = dmarx->last_residue - state.residue;
+- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
++ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
+ size);
+ if (dma_count == size)
+ dmarx->last_residue = state.residue;
+@@ -1123,7 +1118,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
+ return;
+ }
+
+- sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
++ uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
+
+ /* The DMA buffer is now the FIFO the TTY subsystem can use */
+ uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
+@@ -1133,7 +1128,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
+ goto skip_rx;
+
+ /* Allocate and map DMA RX buffers */
+- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
++ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+@@ -1141,12 +1136,12 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
+ goto skip_rx;
+ }
+
+- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
++ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
+ "RX buffer B", ret);
+- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
++ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
+ DMA_FROM_DEVICE);
+ goto skip_rx;
+ }
+@@ -1200,8 +1195,9 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
+ /* In theory, this should already be done by pl011_dma_flush_buffer */
+ dmaengine_terminate_all(uap->dmatx.chan);
+ if (uap->dmatx.queued) {
+- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+- DMA_TO_DEVICE);
++ dma_unmap_single(uap->dmatx.chan->device->dev,
++ uap->dmatx.dma, uap->dmatx.len,
++ DMA_TO_DEVICE);
+ uap->dmatx.queued = false;
+ }
+
+@@ -1212,8 +1208,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
+ if (uap->using_rx_dma) {
+ dmaengine_terminate_all(uap->dmarx.chan);
+ /* Clean up the RX DMA */
+- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
+- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
++ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
++ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
+ if (uap->dmarx.poll_rate)
+ del_timer_sync(&uap->dmarx.timer);
+ uap->using_rx_dma = false;
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index b398fba942964..b4b849415c503 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -769,6 +769,18 @@ static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
+ case SC16IS7XX_IIR_RTOI_SRC:
+ case SC16IS7XX_IIR_XOFFI_SRC:
+ rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
++
++ /*
++ * There is a silicon bug that makes the chip report a
++ * time-out interrupt but no data in the FIFO. This is
++ * described in errata section 18.1.4.
++ *
++ * When this happens, read one byte from the FIFO to
++ * clear the interrupt.
++ */
++ if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen)
++ rxlen = 1;
++
+ if (rxlen)
+ sc16is7xx_handle_rx(port, rxlen, iir);
+ break;
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index 6be6009f911e1..f1ca9250cad96 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -88,6 +88,7 @@ static void hidg_release(struct device *dev)
+ {
+ struct f_hidg *hidg = container_of(dev, struct f_hidg, dev);
+
++ kfree(hidg->report_desc);
+ kfree(hidg->set_report_buf);
+ kfree(hidg);
+ }
+@@ -1287,9 +1288,9 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
+ hidg->report_length = opts->report_length;
+ hidg->report_desc_length = opts->report_desc_length;
+ if (opts->report_desc) {
+- hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc,
+- opts->report_desc_length,
+- GFP_KERNEL);
++ hidg->report_desc = kmemdup(opts->report_desc,
++ opts->report_desc_length,
++ GFP_KERNEL);
+ if (!hidg->report_desc) {
+ put_device(&hidg->dev);
+ --opts->refcnt;
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 1c0c61e8ba696..c40f2ecbe1b8c 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -1608,8 +1608,6 @@ static void gadget_unbind_driver(struct device *dev)
+
+ dev_dbg(&udc->dev, "unbinding gadget driver [%s]\n", driver->function);
+
+- kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+-
+ udc->allow_connect = false;
+ cancel_work_sync(&udc->vbus_work);
+ mutex_lock(&udc->connect_lock);
+@@ -1629,6 +1627,8 @@ static void gadget_unbind_driver(struct device *dev)
+ driver->is_bound = false;
+ udc->driver = NULL;
+ mutex_unlock(&udc_lock);
++
++ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ }
+
+ /* ------------------------------------------------------------------------- */
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 24bcf6ab12d8a..e02ef31da68e4 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -348,8 +348,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ /* xHC spec requires PCI devices to support D3hot and D3cold */
+ if (xhci->hci_version >= 0x120)
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+- else if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version >= 0x110)
+- xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
+index 3c3bab33e03a5..49d6b2388b874 100644
+--- a/drivers/usb/typec/class.c
++++ b/drivers/usb/typec/class.c
+@@ -267,7 +267,7 @@ static void typec_altmode_put_partner(struct altmode *altmode)
+ if (!partner)
+ return;
+
+- adev = &partner->adev;
++ adev = &altmode->adev;
+
+ if (is_typec_plug(adev->dev.parent)) {
+ struct typec_plug *plug = to_typec_plug(adev->dev.parent);
+@@ -497,7 +497,8 @@ static void typec_altmode_release(struct device *dev)
+ {
+ struct altmode *alt = to_altmode(to_typec_altmode(dev));
+
+- typec_altmode_put_partner(alt);
++ if (!is_typec_port(dev->parent))
++ typec_altmode_put_partner(alt);
+
+ altmode_id_remove(alt->adev.dev.parent, alt->id);
+ kfree(alt);
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index bf99654371b35..2b7e796c48897 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -2508,13 +2508,18 @@ static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+ int err = 0;
+
+- if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
++ if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) {
++ u16 idx = cvq->vring.last_avail_idx;
++
+ err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
+ MLX5_CVQ_MAX_ENT, false,
+ (struct vring_desc *)(uintptr_t)cvq->desc_addr,
+ (struct vring_avail *)(uintptr_t)cvq->driver_addr,
+ (struct vring_used *)(uintptr_t)cvq->device_addr);
+
++ if (!err)
++ cvq->vring.last_avail_idx = cvq->vring.last_used_idx = idx;
++ }
+ return err;
+ }
+
+diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
+index 2c6078a6b8ecb..58ca7c936393c 100644
+--- a/fs/nilfs2/sufile.c
++++ b/fs/nilfs2/sufile.c
+@@ -501,15 +501,38 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
+
+ down_write(&NILFS_MDT(sufile)->mi_sem);
+ ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
+- if (!ret) {
+- mark_buffer_dirty(bh);
+- nilfs_mdt_mark_dirty(sufile);
+- kaddr = kmap_atomic(bh->b_page);
+- su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
++ if (ret)
++ goto out_sem;
++
++ kaddr = kmap_atomic(bh->b_page);
++ su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
++ if (unlikely(nilfs_segment_usage_error(su))) {
++ struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
++
++ kunmap_atomic(kaddr);
++ brelse(bh);
++ if (nilfs_segment_is_active(nilfs, segnum)) {
++ nilfs_error(sufile->i_sb,
++ "active segment %llu is erroneous",
++ (unsigned long long)segnum);
++ } else {
++ /*
++ * Segments marked erroneous are never allocated by
++ * nilfs_sufile_alloc(); only active segments, ie,
++ * the segments indexed by ns_segnum or ns_nextnum,
++ * can be erroneous here.
++ */
++ WARN_ON_ONCE(1);
++ }
++ ret = -EIO;
++ } else {
+ nilfs_segment_usage_set_dirty(su);
+ kunmap_atomic(kaddr);
++ mark_buffer_dirty(bh);
++ nilfs_mdt_mark_dirty(sufile);
+ brelse(bh);
+ }
++out_sem:
+ up_write(&NILFS_MDT(sufile)->mi_sem);
+ return ret;
+ }
+@@ -536,9 +559,14 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
+
+ kaddr = kmap_atomic(bh->b_page);
+ su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+- WARN_ON(nilfs_segment_usage_error(su));
+- if (modtime)
++ if (modtime) {
++ /*
++ * Check segusage error and set su_lastmod only when updating
++ * this entry with a valid timestamp, not for cancellation.
++ */
++ WARN_ON_ONCE(nilfs_segment_usage_error(su));
+ su->su_lastmod = cpu_to_le64(modtime);
++ }
+ su->su_nblocks = cpu_to_le32(nblocks);
+ kunmap_atomic(kaddr);
+
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 0f0667957c810..71400496ed365 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -716,7 +716,11 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
+ goto failed_sbh;
+ }
+ nilfs_release_super_block(nilfs);
+- sb_set_blocksize(sb, blocksize);
++ if (!sb_set_blocksize(sb, blocksize)) {
++ nilfs_err(sb, "bad blocksize %d", blocksize);
++ err = -EINVAL;
++ goto out;
++ }
+
+ err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
+ if (err)
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 33ea1440f4b06..2e15b182e59fc 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -1191,32 +1191,103 @@ const struct inode_operations cifs_symlink_inode_ops = {
+ .listxattr = cifs_listxattr,
+ };
+
++/*
++ * Advance the EOF marker to after the source range.
++ */
++static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
++ struct cifs_tcon *src_tcon,
++ unsigned int xid, loff_t src_end)
++{
++ struct cifsFileInfo *writeable_srcfile;
++ int rc = -EINVAL;
++
++ writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
++ if (writeable_srcfile) {
++ if (src_tcon->ses->server->ops->set_file_size)
++ rc = src_tcon->ses->server->ops->set_file_size(
++ xid, src_tcon, writeable_srcfile,
++ src_inode->i_size, true /* no need to set sparse */);
++ else
++ rc = -ENOSYS;
++ cifsFileInfo_put(writeable_srcfile);
++ cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
++ }
++
++ if (rc < 0)
++ goto set_failed;
++
++ netfs_resize_file(&src_cifsi->netfs, src_end);
++ fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
++ return 0;
++
++set_failed:
++ return filemap_write_and_wait(src_inode->i_mapping);
++}
++
++/*
++ * Flush out either the folio that overlaps the beginning of a range in which
++ * pos resides or the folio that overlaps the end of a range unless that folio
++ * is entirely within the range we're going to invalidate. We extend the flush
++ * bounds to encompass the folio.
++ */
++static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
++ bool first)
++{
++ struct folio *folio;
++ unsigned long long fpos, fend;
++ pgoff_t index = pos / PAGE_SIZE;
++ size_t size;
++ int rc = 0;
++
++ folio = filemap_get_folio(inode->i_mapping, index);
++ if (IS_ERR(folio))
++ return 0;
++
++ size = folio_size(folio);
++ fpos = folio_pos(folio);
++ fend = fpos + size - 1;
++ *_fstart = min_t(unsigned long long, *_fstart, fpos);
++ *_fend = max_t(unsigned long long, *_fend, fend);
++ if ((first && pos == fpos) || (!first && pos == fend))
++ goto out;
++
++ rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
++out:
++ folio_put(folio);
++ return rc;
++}
++
+ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
+ struct file *dst_file, loff_t destoff, loff_t len,
+ unsigned int remap_flags)
+ {
+ struct inode *src_inode = file_inode(src_file);
+ struct inode *target_inode = file_inode(dst_file);
++ struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
++ struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
+ struct cifsFileInfo *smb_file_src = src_file->private_data;
+- struct cifsFileInfo *smb_file_target;
+- struct cifs_tcon *target_tcon;
++ struct cifsFileInfo *smb_file_target = dst_file->private_data;
++ struct cifs_tcon *target_tcon, *src_tcon;
++ unsigned long long destend, fstart, fend, new_size;
+ unsigned int xid;
+ int rc;
+
+- if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
++ if (remap_flags & REMAP_FILE_DEDUP)
++ return -EOPNOTSUPP;
++ if (remap_flags & ~REMAP_FILE_ADVISORY)
+ return -EINVAL;
+
+ cifs_dbg(FYI, "clone range\n");
+
+ xid = get_xid();
+
+- if (!src_file->private_data || !dst_file->private_data) {
++ if (!smb_file_src || !smb_file_target) {
+ rc = -EBADF;
+ cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
+ goto out;
+ }
+
+- smb_file_target = dst_file->private_data;
++ src_tcon = tlink_tcon(smb_file_src->tlink);
+ target_tcon = tlink_tcon(smb_file_target->tlink);
+
+ /*
+@@ -1229,20 +1300,63 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
+ if (len == 0)
+ len = src_inode->i_size - off;
+
+- cifs_dbg(FYI, "about to flush pages\n");
+- /* should we flush first and last page first */
+- truncate_inode_pages_range(&target_inode->i_data, destoff,
+- PAGE_ALIGN(destoff + len)-1);
++ cifs_dbg(FYI, "clone range\n");
+
+- if (target_tcon->ses->server->ops->duplicate_extents)
++ /* Flush the source buffer */
++ rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
++ off + len - 1);
++ if (rc)
++ goto unlock;
++
++ /* The server-side copy will fail if the source crosses the EOF marker.
++ * Advance the EOF marker after the flush above to the end of the range
++ * if it's short of that.
++ */
++ if (src_cifsi->netfs.remote_i_size < off + len) {
++ rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
++ if (rc < 0)
++ goto unlock;
++ }
++
++ new_size = destoff + len;
++ destend = destoff + len - 1;
++
++ /* Flush the folios at either end of the destination range to prevent
++ * accidental loss of dirty data outside of the range.
++ */
++ fstart = destoff;
++ fend = destend;
++
++ rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
++ if (rc)
++ goto unlock;
++ rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
++ if (rc)
++ goto unlock;
++
++ /* Discard all the folios that overlap the destination region. */
++ cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
++ truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
++
++ fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
++ i_size_read(target_inode), 0);
++
++ rc = -EOPNOTSUPP;
++ if (target_tcon->ses->server->ops->duplicate_extents) {
+ rc = target_tcon->ses->server->ops->duplicate_extents(xid,
+ smb_file_src, smb_file_target, off, len, destoff);
+- else
+- rc = -EOPNOTSUPP;
++ if (rc == 0 && new_size > i_size_read(target_inode)) {
++ truncate_setsize(target_inode, new_size);
++ netfs_resize_file(&target_cifsi->netfs, new_size);
++ fscache_resize_cookie(cifs_inode_cookie(target_inode),
++ new_size);
++ }
++ }
+
+ /* force revalidate of size and timestamps of target file now
+ that target is updated on the server */
+ CIFS_I(target_inode)->time = 0;
++unlock:
+ /* although unlocking in the reverse order from locking is not
+ strictly necessary here it is a little cleaner to be consistent */
+ unlock_two_nondirectories(src_inode, target_inode);
+@@ -1258,10 +1372,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ {
+ struct inode *src_inode = file_inode(src_file);
+ struct inode *target_inode = file_inode(dst_file);
++ struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
+ struct cifsFileInfo *smb_file_src;
+ struct cifsFileInfo *smb_file_target;
+ struct cifs_tcon *src_tcon;
+ struct cifs_tcon *target_tcon;
++ unsigned long long destend, fstart, fend;
+ ssize_t rc;
+
+ cifs_dbg(FYI, "copychunk range\n");
+@@ -1301,13 +1417,41 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ if (rc)
+ goto unlock;
+
+- /* should we flush first and last page first */
+- truncate_inode_pages(&target_inode->i_data, 0);
++ /* The server-side copy will fail if the source crosses the EOF marker.
++ * Advance the EOF marker after the flush above to the end of the range
++ * if it's short of that.
++ */
++ if (src_cifsi->server_eof < off + len) {
++ rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
++ if (rc < 0)
++ goto unlock;
++ }
++
++ destend = destoff + len - 1;
++
++ /* Flush the folios at either end of the destination range to prevent
++ * accidental loss of dirty data outside of the range.
++ */
++ fstart = destoff;
++ fend = destend;
++
++ rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
++ if (rc)
++ goto unlock;
++ rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
++ if (rc)
++ goto unlock;
++
++ /* Discard all the folios that overlap the destination region. */
++ truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
+
+ rc = file_modified(dst_file);
+- if (!rc)
++ if (!rc) {
+ rc = target_tcon->ses->server->ops->copychunk_range(xid,
+ smb_file_src, smb_file_target, off, len, destoff);
++ if (rc > 0 && destoff + rc > i_size_read(target_inode))
++ truncate_setsize(target_inode, destoff + rc);
++ }
+
+ file_accessed(src_file);
+
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index e628848a1df93..6ef3c00de5ca1 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -2834,6 +2834,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
+ usleep_range(512, 2048);
+ } while (++retry_count < 5);
+
++ if (!rc && !dfs_rsp)
++ rc = -EIO;
+ if (rc) {
+ if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
+ cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc);
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index c7e0d80dbf6a5..67575bc8a7e29 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -196,6 +196,7 @@ enum cpuhp_state {
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+ CPUHP_AP_ARM64_ISNDEP_STARTING,
+ CPUHP_AP_SMPCFD_DYING,
++ CPUHP_AP_HRTIMERS_DYING,
+ CPUHP_AP_X86_TBOOT_DYING,
+ CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
+ CPUHP_AP_ONLINE,
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index 0ee140176f102..f2044d5a652b5 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -531,9 +531,9 @@ extern void sysrq_timer_list_show(void);
+
+ int hrtimers_prepare_cpu(unsigned int cpu);
+ #ifdef CONFIG_HOTPLUG_CPU
+-int hrtimers_dead_cpu(unsigned int cpu);
++int hrtimers_cpu_dying(unsigned int cpu);
+ #else
+-#define hrtimers_dead_cpu NULL
++#define hrtimers_cpu_dying NULL
+ #endif
+
+ #endif
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index e46f6b49eb389..1c6f35ba1604f 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -880,10 +880,7 @@ static inline bool hugepage_migration_supported(struct hstate *h)
+ return arch_hugetlb_migration_supported(h);
+ }
+
+-static inline bool __vma_private_lock(struct vm_area_struct *vma)
+-{
+- return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
+-}
++bool __vma_private_lock(struct vm_area_struct *vma);
+
+ /*
+ * Movability check is different as compared to migration check.
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index 3c9da1f8979e3..9d87090953bcc 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -657,6 +657,7 @@ static inline void dev_iommu_priv_set(struct device *dev, void *priv)
+ dev->iommu->priv = priv;
+ }
+
++extern struct mutex iommu_probe_device_lock;
+ int iommu_probe_device(struct device *dev);
+ void iommu_release_device(struct device *dev);
+
+diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
+index 85a64cb95d755..8de5d51a0b5e7 100644
+--- a/include/linux/kprobes.h
++++ b/include/linux/kprobes.h
+@@ -140,7 +140,7 @@ static inline bool kprobe_ftrace(struct kprobe *p)
+ *
+ */
+ struct kretprobe_holder {
+- struct kretprobe *rp;
++ struct kretprobe __rcu *rp;
+ refcount_t ref;
+ };
+
+@@ -202,10 +202,8 @@ extern int arch_trampoline_kprobe(struct kprobe *p);
+ #ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+ {
+- RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
+- "Kretprobe is accessed from instance under preemptive context");
+-
+- return (struct kretprobe *)READ_ONCE(ri->node.rethook->data);
++ /* rethook::data is non-changed field, so that you can access it freely. */
++ return (struct kretprobe *)ri->node.rethook->data;
+ }
+ static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+ {
+@@ -250,10 +248,7 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
+
+ static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+ {
+- RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
+- "Kretprobe is accessed from instance under preemptive context");
+-
+- return READ_ONCE(ri->rph->rp);
++ return rcu_dereference_check(ri->rph->rp, rcu_read_lock_any_held());
+ }
+
+ static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+diff --git a/include/linux/rethook.h b/include/linux/rethook.h
+index bdbe6717f45a2..a00963f33bc14 100644
+--- a/include/linux/rethook.h
++++ b/include/linux/rethook.h
+@@ -29,7 +29,12 @@ typedef void (*rethook_handler_t) (struct rethook_node *, void *, struct pt_regs
+ */
+ struct rethook {
+ void *data;
+- rethook_handler_t handler;
++ /*
++ * To avoid sparse warnings, this uses a raw function pointer with
++ * __rcu, instead of rethook_handler_t. But this must be same as
++ * rethook_handler_t.
++ */
++ void (__rcu *handler) (struct rethook_node *, void *, struct pt_regs *);
+ struct freelist_head pool;
+ refcount_t ref;
+ struct rcu_head rcu;
+diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
+index d82ff9fa1a6e8..9f4a4f70270df 100644
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -172,6 +172,7 @@ struct stmmac_fpe_cfg {
+ bool hs_enable; /* FPE handshake enable */
+ enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */
+ enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
++ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
+ };
+
+ struct stmmac_safety_feature_cfg {
+diff --git a/include/net/genetlink.h b/include/net/genetlink.h
+index 9f97f73615b69..b9e5a22ae3ff9 100644
+--- a/include/net/genetlink.h
++++ b/include/net/genetlink.h
+@@ -12,10 +12,12 @@
+ * struct genl_multicast_group - generic netlink multicast group
+ * @name: name of the multicast group, names are per-family
+ * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
++ * @cap_sys_admin: whether %CAP_SYS_ADMIN is required for binding
+ */
+ struct genl_multicast_group {
+ char name[GENL_NAMSIZ];
+ u8 flags;
++ u8 cap_sys_admin:1;
+ };
+
+ struct genl_ops;
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 19646fdec23dc..c3d56b337f358 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1460,17 +1460,22 @@ static inline int tcp_full_space(const struct sock *sk)
+ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
+ }
+
+-static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
++static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
+ {
+ int unused_mem = sk_unused_reserved_mem(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+- tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
++ tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
+ if (unused_mem)
+ tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
+ tcp_win_from_space(sk, unused_mem));
+ }
+
++static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
++{
++ __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
++}
++
+ void tcp_cleanup_rbuf(struct sock *sk, int copied);
+ void __tcp_cleanup_rbuf(struct sock *sk, int copied);
+
+diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
+index 92a673cd9b4fd..77b83ea62dd69 100644
+--- a/include/rdma/ib_umem.h
++++ b/include/rdma/ib_umem.h
+@@ -78,6 +78,13 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+ {
+ __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
+ umem->sgt_append.sgt.nents, pgsz);
++ biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
++ biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
++}
++
++static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
++{
++ return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
+ }
+
+ /**
+@@ -93,7 +100,7 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+ */
+ #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
+ for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
+- __rdma_block_iter_next(biter);)
++ __rdma_umem_block_iter_next(biter);)
+
+ #ifdef CONFIG_INFINIBAND_USER_MEM
+
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 975d6e9efbcb4..5582509003264 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -2835,6 +2835,7 @@ struct ib_block_iter {
+ /* internal states */
+ struct scatterlist *__sg; /* sg holding the current aligned block */
+ dma_addr_t __dma_addr; /* unaligned DMA address of this block */
++ size_t __sg_numblocks; /* ib_umem_num_dma_blocks() */
+ unsigned int __sg_nents; /* number of SG entries */
+ unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
+ unsigned int __pg_bit; /* alignment of current block */
+diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
+index 466fd3f4447c2..af8f4c304d272 100644
+--- a/include/uapi/linux/netfilter/nf_tables.h
++++ b/include/uapi/linux/netfilter/nf_tables.h
+@@ -816,12 +816,14 @@ enum nft_exthdr_flags {
+ * @NFT_EXTHDR_OP_TCP: match against tcp options
+ * @NFT_EXTHDR_OP_IPV4: match against ipv4 options
+ * @NFT_EXTHDR_OP_SCTP: match against sctp chunks
++ * @NFT_EXTHDR_OP_DCCP: match against dccp otions
+ */
+ enum nft_exthdr_op {
+ NFT_EXTHDR_OP_IPV6,
+ NFT_EXTHDR_OP_TCPOPT,
+ NFT_EXTHDR_OP_IPV4,
+ NFT_EXTHDR_OP_SCTP,
++ NFT_EXTHDR_OP_DCCP,
+ __NFT_EXTHDR_OP_MAX
+ };
+ #define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1)
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index f413ebed81ab3..35894955b4549 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1377,6 +1377,7 @@ static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
+ if (!(ctx->flags & IORING_SETUP_IOPOLL))
+ return;
+
++ percpu_ref_get(&ctx->refs);
+ mutex_lock(&ctx->uring_lock);
+ while (!wq_list_empty(&ctx->iopoll_list)) {
+ /* let it sleep and repeat later if can't complete a request */
+@@ -1394,6 +1395,7 @@ static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
+ }
+ }
+ mutex_unlock(&ctx->uring_lock);
++ percpu_ref_put(&ctx->refs);
+ }
+
+ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
+@@ -2800,12 +2802,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
+ init_completion(&exit.completion);
+ init_task_work(&exit.task_work, io_tctx_exit_cb);
+ exit.ctx = ctx;
+- /*
+- * Some may use context even when all refs and requests have been put,
+- * and they are free to do so while still holding uring_lock or
+- * completion_lock, see io_req_task_submit(). Apart from other work,
+- * this lock/unlock section also waits them to finish.
+- */
++
+ mutex_lock(&ctx->uring_lock);
+ while (!list_empty(&ctx->tctx_list)) {
+ WARN_ON_ONCE(time_after(jiffies, timeout));
+diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
+index d60c758326b42..acaf8dad05401 100644
+--- a/io_uring/rsrc.h
++++ b/io_uring/rsrc.h
+@@ -79,17 +79,10 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
+
+ int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
+
+-#if defined(CONFIG_UNIX)
+-static inline bool io_file_need_scm(struct file *filp)
+-{
+- return !!unix_get_socket(filp);
+-}
+-#else
+ static inline bool io_file_need_scm(struct file *filp)
+ {
+ return false;
+ }
+-#endif
+
+ static inline int io_scm_file_account(struct io_ring_ctx *ctx,
+ struct file *file)
+diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
+index 122dacb3a4439..66d1708042a72 100644
+--- a/kernel/cgroup/legacy_freezer.c
++++ b/kernel/cgroup/legacy_freezer.c
+@@ -66,9 +66,15 @@ static struct freezer *parent_freezer(struct freezer *freezer)
+ bool cgroup_freezing(struct task_struct *task)
+ {
+ bool ret;
++ unsigned int state;
+
+ rcu_read_lock();
+- ret = task_freezer(task)->state & CGROUP_FREEZING;
++ /* Check if the cgroup is still FREEZING, but not FROZEN. The extra
++ * !FROZEN check is required, because the FREEZING bit is not cleared
++ * when the state FROZEN is reached.
++ */
++ state = task_freezer(task)->state;
++ ret = (state & CGROUP_FREEZING) && !(state & CGROUP_FROZEN);
+ rcu_read_unlock();
+
+ return ret;
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 0e4d362e90825..551468d9c5a85 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1733,7 +1733,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
+ [CPUHP_HRTIMERS_PREPARE] = {
+ .name = "hrtimers:prepare",
+ .startup.single = hrtimers_prepare_cpu,
+- .teardown.single = hrtimers_dead_cpu,
++ .teardown.single = NULL,
+ },
+ [CPUHP_SMPCFD_PREPARE] = {
+ .name = "smpcfd:prepare",
+@@ -1800,6 +1800,12 @@ static struct cpuhp_step cpuhp_hp_states[] = {
+ .startup.single = NULL,
+ .teardown.single = smpcfd_dying_cpu,
+ },
++ [CPUHP_AP_HRTIMERS_DYING] = {
++ .name = "hrtimers:dying",
++ .startup.single = NULL,
++ .teardown.single = hrtimers_cpu_dying,
++ },
++
+ /* Entry state on starting. Interrupts enabled from here on. Transient
+ * state for synchronsization */
+ [CPUHP_AP_ONLINE] = {
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 8f2b9d8b9150e..0193243f65e5c 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1812,31 +1812,34 @@ static inline void perf_event__state_init(struct perf_event *event)
+ PERF_EVENT_STATE_INACTIVE;
+ }
+
+-static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
++static int __perf_event_read_size(u64 read_format, int nr_siblings)
+ {
+ int entry = sizeof(u64); /* value */
+ int size = 0;
+ int nr = 1;
+
+- if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
++ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ size += sizeof(u64);
+
+- if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
++ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ size += sizeof(u64);
+
+- if (event->attr.read_format & PERF_FORMAT_ID)
++ if (read_format & PERF_FORMAT_ID)
+ entry += sizeof(u64);
+
+- if (event->attr.read_format & PERF_FORMAT_LOST)
++ if (read_format & PERF_FORMAT_LOST)
+ entry += sizeof(u64);
+
+- if (event->attr.read_format & PERF_FORMAT_GROUP) {
++ if (read_format & PERF_FORMAT_GROUP) {
+ nr += nr_siblings;
+ size += sizeof(u64);
+ }
+
+- size += entry * nr;
+- event->read_size = size;
++ /*
++ * Since perf_event_validate_size() limits this to 16k and inhibits
++ * adding more siblings, this will never overflow.
++ */
++ return size + nr * entry;
+ }
+
+ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
+@@ -1886,8 +1889,9 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
+ */
+ static void perf_event__header_size(struct perf_event *event)
+ {
+- __perf_event_read_size(event,
+- event->group_leader->nr_siblings);
++ event->read_size =
++ __perf_event_read_size(event->attr.read_format,
++ event->group_leader->nr_siblings);
+ __perf_event_header_size(event, event->attr.sample_type);
+ }
+
+@@ -1918,24 +1922,35 @@ static void perf_event__id_header_size(struct perf_event *event)
+ event->id_header_size = size;
+ }
+
++/*
++ * Check that adding an event to the group does not result in anybody
++ * overflowing the 64k event limit imposed by the output buffer.
++ *
++ * Specifically, check that the read_size for the event does not exceed 16k,
++ * read_size being the one term that grows with groups size. Since read_size
++ * depends on per-event read_format, also (re)check the existing events.
++ *
++ * This leaves 48k for the constant size fields and things like callchains,
++ * branch stacks and register sets.
++ */
+ static bool perf_event_validate_size(struct perf_event *event)
+ {
+- /*
+- * The values computed here will be over-written when we actually
+- * attach the event.
+- */
+- __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
+- __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
+- perf_event__id_header_size(event);
++ struct perf_event *sibling, *group_leader = event->group_leader;
+
+- /*
+- * Sum the lot; should not exceed the 64k limit we have on records.
+- * Conservative limit to allow for callchains and other variable fields.
+- */
+- if (event->read_size + event->header_size +
+- event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
++ if (__perf_event_read_size(event->attr.read_format,
++ group_leader->nr_siblings + 1) > 16*1024)
+ return false;
+
++ if (__perf_event_read_size(group_leader->attr.read_format,
++ group_leader->nr_siblings + 1) > 16*1024)
++ return false;
++
++ for_each_sibling_event(sibling, group_leader) {
++ if (__perf_event_read_size(sibling->attr.read_format,
++ group_leader->nr_siblings + 1) > 16*1024)
++ return false;
++ }
++
+ return true;
+ }
+
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 3da9726232ff9..dbfddfa86c14e 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2253,7 +2253,7 @@ int register_kretprobe(struct kretprobe *rp)
+ if (!rp->rph)
+ return -ENOMEM;
+
+- rp->rph->rp = rp;
++ rcu_assign_pointer(rp->rph->rp, rp);
+ for (i = 0; i < rp->maxactive; i++) {
+ inst = kzalloc(sizeof(struct kretprobe_instance) +
+ rp->data_size, GFP_KERNEL);
+@@ -2314,7 +2314,7 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
+ #ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ rethook_free(rps[i]->rh);
+ #else
+- rps[i]->rph->rp = NULL;
++ rcu_assign_pointer(rps[i]->rph->rp, NULL);
+ #endif
+ }
+ mutex_unlock(&kprobe_mutex);
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index e4f0e3b0c4f4f..5561dabc9b225 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -2216,29 +2216,22 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+ }
+ }
+
+-int hrtimers_dead_cpu(unsigned int scpu)
++int hrtimers_cpu_dying(unsigned int dying_cpu)
+ {
+ struct hrtimer_cpu_base *old_base, *new_base;
+- int i;
++ int i, ncpu = cpumask_first(cpu_active_mask);
+
+- BUG_ON(cpu_online(scpu));
+- tick_cancel_sched_timer(scpu);
++ tick_cancel_sched_timer(dying_cpu);
++
++ old_base = this_cpu_ptr(&hrtimer_bases);
++ new_base = &per_cpu(hrtimer_bases, ncpu);
+
+- /*
+- * this BH disable ensures that raise_softirq_irqoff() does
+- * not wakeup ksoftirqd (and acquire the pi-lock) while
+- * holding the cpu_base lock
+- */
+- local_bh_disable();
+- local_irq_disable();
+- old_base = &per_cpu(hrtimer_bases, scpu);
+- new_base = this_cpu_ptr(&hrtimer_bases);
+ /*
+ * The caller is globally serialized and nobody else
+ * takes two locks at once, deadlock is not possible.
+ */
+- raw_spin_lock(&new_base->lock);
+- raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
++ raw_spin_lock(&old_base->lock);
++ raw_spin_lock_nested(&new_base->lock, SINGLE_DEPTH_NESTING);
+
+ for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+ migrate_hrtimer_list(&old_base->clock_base[i],
+@@ -2249,15 +2242,13 @@ int hrtimers_dead_cpu(unsigned int scpu)
+ * The migration might have changed the first expiring softirq
+ * timer on this CPU. Update it.
+ */
+- hrtimer_update_softirq_timer(new_base, false);
++ __hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT);
++ /* Tell the other CPU to retrigger the next event */
++ smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
+
+- raw_spin_unlock(&old_base->lock);
+ raw_spin_unlock(&new_base->lock);
++ raw_spin_unlock(&old_base->lock);
+
+- /* Check, if we got expired work to do */
+- __hrtimer_peek_ahead_timers();
+- local_irq_enable();
+- local_bh_enable();
+ return 0;
+ }
+
+diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c
+index 468006cce7cae..3686626b52c57 100644
+--- a/kernel/trace/rethook.c
++++ b/kernel/trace/rethook.c
+@@ -63,7 +63,7 @@ static void rethook_free_rcu(struct rcu_head *head)
+ */
+ void rethook_stop(struct rethook *rh)
+ {
+- WRITE_ONCE(rh->handler, NULL);
++ rcu_assign_pointer(rh->handler, NULL);
+ }
+
+ /**
+@@ -78,11 +78,17 @@ void rethook_stop(struct rethook *rh)
+ */
+ void rethook_free(struct rethook *rh)
+ {
+- WRITE_ONCE(rh->handler, NULL);
++ rethook_stop(rh);
+
+ call_rcu(&rh->rcu, rethook_free_rcu);
+ }
+
++static inline rethook_handler_t rethook_get_handler(struct rethook *rh)
++{
++ return (rethook_handler_t)rcu_dereference_check(rh->handler,
++ rcu_read_lock_any_held());
++}
++
+ /**
+ * rethook_alloc() - Allocate struct rethook.
+ * @data: a data to pass the @handler when hooking the return.
+@@ -102,7 +108,7 @@ struct rethook *rethook_alloc(void *data, rethook_handler_t handler)
+ }
+
+ rh->data = data;
+- rh->handler = handler;
++ rcu_assign_pointer(rh->handler, handler);
+ rh->pool.head = NULL;
+ refcount_set(&rh->ref, 1);
+
+@@ -142,9 +148,10 @@ static void free_rethook_node_rcu(struct rcu_head *head)
+ */
+ void rethook_recycle(struct rethook_node *node)
+ {
+- lockdep_assert_preemption_disabled();
++ rethook_handler_t handler;
+
+- if (likely(READ_ONCE(node->rethook->handler)))
++ handler = rethook_get_handler(node->rethook);
++ if (likely(handler))
+ freelist_add(&node->freelist, &node->rethook->pool);
+ else
+ call_rcu(&node->rcu, free_rethook_node_rcu);
+@@ -160,11 +167,9 @@ NOKPROBE_SYMBOL(rethook_recycle);
+ */
+ struct rethook_node *rethook_try_get(struct rethook *rh)
+ {
+- rethook_handler_t handler = READ_ONCE(rh->handler);
++ rethook_handler_t handler = rethook_get_handler(rh);
+ struct freelist_node *fn;
+
+- lockdep_assert_preemption_disabled();
+-
+ /* Check whether @rh is going to be freed. */
+ if (unlikely(!handler))
+ return NULL;
+@@ -312,7 +317,7 @@ unsigned long rethook_trampoline_handler(struct pt_regs *regs,
+ rhn = container_of(first, struct rethook_node, llist);
+ if (WARN_ON_ONCE(rhn->frame != frame))
+ break;
+- handler = READ_ONCE(rhn->rethook->handler);
++ handler = rethook_get_handler(rhn->rethook);
+ if (handler)
+ handler(rhn, rhn->rethook->data, regs);
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index b7383358c4ea1..c02a4cb879913 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -646,8 +646,8 @@ static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
+
+ *cnt = rb_time_cnt(top);
+
+- /* If top and bottom counts don't match, this interrupted a write */
+- if (*cnt != rb_time_cnt(bottom))
++ /* If top and msb counts don't match, this interrupted a write */
++ if (*cnt != rb_time_cnt(msb))
+ return false;
+
+ /* The shift to msb will lose its cnt bits */
+@@ -3025,22 +3025,19 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+ local_read(&bpage->write) & ~RB_WRITE_MASK;
+ unsigned long event_length = rb_event_length(event);
+
++ /*
++ * For the before_stamp to be different than the write_stamp
++ * to make sure that the next event adds an absolute
++ * value and does not rely on the saved write stamp, which
++ * is now going to be bogus.
++ */
++ rb_time_set(&cpu_buffer->before_stamp, 0);
++
+ /* Something came in, can't discard */
+ if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
+ write_stamp, write_stamp - delta))
+ return 0;
+
+- /*
+- * It's possible that the event time delta is zero
+- * (has the same time stamp as the previous event)
+- * in which case write_stamp and before_stamp could
+- * be the same. In such a case, force before_stamp
+- * to be different than write_stamp. It doesn't
+- * matter what it is, as long as its different.
+- */
+- if (!delta)
+- rb_time_set(&cpu_buffer->before_stamp, 0);
+-
+ /*
+ * If an event were to come in now, it would see that the
+ * write_stamp and the before_stamp are different, and assume
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index ddcfc78e93e00..d2db4d6f0f2fd 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2297,13 +2297,7 @@ int is_tracing_stopped(void)
+ return global_trace.stop_count;
+ }
+
+-/**
+- * tracing_start - quick start of the tracer
+- *
+- * If tracing is enabled but was stopped by tracing_stop,
+- * this will start the tracer back up.
+- */
+-void tracing_start(void)
++static void tracing_start_tr(struct trace_array *tr)
+ {
+ struct trace_buffer *buffer;
+ unsigned long flags;
+@@ -2311,119 +2305,83 @@ void tracing_start(void)
+ if (tracing_disabled)
+ return;
+
+- raw_spin_lock_irqsave(&global_trace.start_lock, flags);
+- if (--global_trace.stop_count) {
+- if (global_trace.stop_count < 0) {
++ raw_spin_lock_irqsave(&tr->start_lock, flags);
++ if (--tr->stop_count) {
++ if (WARN_ON_ONCE(tr->stop_count < 0)) {
+ /* Someone screwed up their debugging */
+- WARN_ON_ONCE(1);
+- global_trace.stop_count = 0;
++ tr->stop_count = 0;
+ }
+ goto out;
+ }
+
+ /* Prevent the buffers from switching */
+- arch_spin_lock(&global_trace.max_lock);
++ arch_spin_lock(&tr->max_lock);
+
+- buffer = global_trace.array_buffer.buffer;
++ buffer = tr->array_buffer.buffer;
+ if (buffer)
+ ring_buffer_record_enable(buffer);
+
+ #ifdef CONFIG_TRACER_MAX_TRACE
+- buffer = global_trace.max_buffer.buffer;
++ buffer = tr->max_buffer.buffer;
+ if (buffer)
+ ring_buffer_record_enable(buffer);
+ #endif
+
+- arch_spin_unlock(&global_trace.max_lock);
+-
+- out:
+- raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
+-}
+-
+-static void tracing_start_tr(struct trace_array *tr)
+-{
+- struct trace_buffer *buffer;
+- unsigned long flags;
+-
+- if (tracing_disabled)
+- return;
+-
+- /* If global, we need to also start the max tracer */
+- if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
+- return tracing_start();
+-
+- raw_spin_lock_irqsave(&tr->start_lock, flags);
+-
+- if (--tr->stop_count) {
+- if (tr->stop_count < 0) {
+- /* Someone screwed up their debugging */
+- WARN_ON_ONCE(1);
+- tr->stop_count = 0;
+- }
+- goto out;
+- }
+-
+- buffer = tr->array_buffer.buffer;
+- if (buffer)
+- ring_buffer_record_enable(buffer);
++ arch_spin_unlock(&tr->max_lock);
+
+ out:
+ raw_spin_unlock_irqrestore(&tr->start_lock, flags);
+ }
+
+ /**
+- * tracing_stop - quick stop of the tracer
++ * tracing_start - quick start of the tracer
+ *
+- * Light weight way to stop tracing. Use in conjunction with
+- * tracing_start.
++ * If tracing is enabled but was stopped by tracing_stop,
++ * this will start the tracer back up.
+ */
+-void tracing_stop(void)
++void tracing_start(void)
++
++{
++ return tracing_start_tr(&global_trace);
++}
++
++static void tracing_stop_tr(struct trace_array *tr)
+ {
+ struct trace_buffer *buffer;
+ unsigned long flags;
+
+- raw_spin_lock_irqsave(&global_trace.start_lock, flags);
+- if (global_trace.stop_count++)
++ raw_spin_lock_irqsave(&tr->start_lock, flags);
++ if (tr->stop_count++)
+ goto out;
+
+ /* Prevent the buffers from switching */
+- arch_spin_lock(&global_trace.max_lock);
++ arch_spin_lock(&tr->max_lock);
+
+- buffer = global_trace.array_buffer.buffer;
++ buffer = tr->array_buffer.buffer;
+ if (buffer)
+ ring_buffer_record_disable(buffer);
+
+ #ifdef CONFIG_TRACER_MAX_TRACE
+- buffer = global_trace.max_buffer.buffer;
++ buffer = tr->max_buffer.buffer;
+ if (buffer)
+ ring_buffer_record_disable(buffer);
+ #endif
+
+- arch_spin_unlock(&global_trace.max_lock);
++ arch_spin_unlock(&tr->max_lock);
+
+ out:
+- raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
++ raw_spin_unlock_irqrestore(&tr->start_lock, flags);
+ }
+
+-static void tracing_stop_tr(struct trace_array *tr)
++/**
++ * tracing_stop - quick stop of the tracer
++ *
++ * Light weight way to stop tracing. Use in conjunction with
++ * tracing_start.
++ */
++void tracing_stop(void)
+ {
+- struct trace_buffer *buffer;
+- unsigned long flags;
+-
+- /* If global, we need to also stop the max tracer */
+- if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
+- return tracing_stop();
+-
+- raw_spin_lock_irqsave(&tr->start_lock, flags);
+- if (tr->stop_count++)
+- goto out;
+-
+- buffer = tr->array_buffer.buffer;
+- if (buffer)
+- ring_buffer_record_disable(buffer);
+-
+- out:
+- raw_spin_unlock_irqrestore(&tr->start_lock, flags);
++ return tracing_stop_tr(&global_trace);
+ }
+
+ static int trace_save_cmdline(struct task_struct *tsk)
+@@ -2707,8 +2665,11 @@ void trace_buffered_event_enable(void)
+ for_each_tracing_cpu(cpu) {
+ page = alloc_pages_node(cpu_to_node(cpu),
+ GFP_KERNEL | __GFP_NORETRY, 0);
+- if (!page)
+- goto failed;
++ /* This is just an optimization and can handle failures */
++ if (!page) {
++ pr_err("Failed to allocate event buffer\n");
++ break;
++ }
+
+ event = page_address(page);
+ memset(event, 0, sizeof(*event));
+@@ -2722,10 +2683,6 @@ void trace_buffered_event_enable(void)
+ WARN_ON_ONCE(1);
+ preempt_enable();
+ }
+-
+- return;
+- failed:
+- trace_buffered_event_disable();
+ }
+
+ static void enable_trace_buffered_event(void *data)
+@@ -2760,11 +2717,9 @@ void trace_buffered_event_disable(void)
+ if (--trace_buffered_event_ref)
+ return;
+
+- preempt_disable();
+ /* For each CPU, set the buffer as used. */
+- smp_call_function_many(tracing_buffer_mask,
+- disable_trace_buffered_event, NULL, 1);
+- preempt_enable();
++ on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
++ NULL, true);
+
+ /* Wait for all current users to finish */
+ synchronize_rcu();
+@@ -2773,17 +2728,19 @@ void trace_buffered_event_disable(void)
+ free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
+ per_cpu(trace_buffered_event, cpu) = NULL;
+ }
++
+ /*
+- * Make sure trace_buffered_event is NULL before clearing
+- * trace_buffered_event_cnt.
++ * Wait for all CPUs that potentially started checking if they can use
++ * their event buffer only after the previous synchronize_rcu() call and
++ * they still read a valid pointer from trace_buffered_event. It must be
++ * ensured they don't see cleared trace_buffered_event_cnt else they
++ * could wrongly decide to use the pointed-to buffer which is now freed.
+ */
+- smp_wmb();
++ synchronize_rcu();
+
+- preempt_disable();
+- /* Do the work on each cpu */
+- smp_call_function_many(tracing_buffer_mask,
+- enable_trace_buffered_event, NULL, 1);
+- preempt_enable();
++ /* For each CPU, relinquish the buffer */
++ on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
++ true);
+ }
+
+ static struct trace_buffer *temp_buffer;
+@@ -6258,6 +6215,15 @@ static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
+ per_cpu_ptr(buf->data, cpu)->entries = val;
+ }
+
++static void update_buffer_entries(struct array_buffer *buf, int cpu)
++{
++ if (cpu == RING_BUFFER_ALL_CPUS) {
++ set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
++ } else {
++ per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
++ }
++}
++
+ #ifdef CONFIG_TRACER_MAX_TRACE
+ /* resize @tr's buffer to the size of @size_tr's entries */
+ static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
+@@ -6302,13 +6268,15 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
+ if (!tr->array_buffer.buffer)
+ return 0;
+
++ /* Do not allow tracing while resizng ring buffer */
++ tracing_stop_tr(tr);
++
+ ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
+ if (ret < 0)
+- return ret;
++ goto out_start;
+
+ #ifdef CONFIG_TRACER_MAX_TRACE
+- if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
+- !tr->current_trace->use_max_tr)
++ if (!tr->current_trace->use_max_tr)
+ goto out;
+
+ ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
+@@ -6333,22 +6301,17 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
+ WARN_ON(1);
+ tracing_disabled = 1;
+ }
+- return ret;
++ goto out_start;
+ }
+
+- if (cpu == RING_BUFFER_ALL_CPUS)
+- set_buffer_entries(&tr->max_buffer, size);
+- else
+- per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
++ update_buffer_entries(&tr->max_buffer, cpu);
+
+ out:
+ #endif /* CONFIG_TRACER_MAX_TRACE */
+
+- if (cpu == RING_BUFFER_ALL_CPUS)
+- set_buffer_entries(&tr->array_buffer, size);
+- else
+- per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
+-
++ update_buffer_entries(&tr->array_buffer, cpu);
++ out_start:
++ tracing_start_tr(tr);
+ return ret;
+ }
+
+diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c
+index 2c8bbe3e4c148..f37b7aec088ec 100644
+--- a/lib/zstd/common/fse_decompress.c
++++ b/lib/zstd/common/fse_decompress.c
+@@ -312,7 +312,7 @@ size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size
+
+ typedef struct {
+ short ncount[FSE_MAX_SYMBOL_VALUE + 1];
+- FSE_DTable dtable[1]; /* Dynamically sized */
++ FSE_DTable dtable[]; /* Dynamically sized */
+ } FSE_DecompressWksp;
+
+
+diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
+index dbf5e4de97a0f..9ea21b6d266be 100644
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -2210,7 +2210,7 @@ static int damon_sysfs_update_target(struct damon_target *target,
+ struct damon_ctx *ctx,
+ struct damon_sysfs_target *sys_target)
+ {
+- int err;
++ int err = 0;
+
+ if (damon_target_has_pid(ctx)) {
+ err = damon_sysfs_update_target_pid(target, sys_target->pid);
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 2d930470aacaa..d633ab8cd56f1 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -3319,7 +3319,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
+ }
+ }
+
+- if (pmd_none(*vmf->pmd))
++ if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
+ pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
+
+ /* See comment in handle_pte_fault() */
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index aa4a68dfb3b92..37288a7f0fa65 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1189,6 +1189,13 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
+ return (get_vma_private_data(vma) & flag) != 0;
+ }
+
++bool __vma_private_lock(struct vm_area_struct *vma)
++{
++ return !(vma->vm_flags & VM_MAYSHARE) &&
++ get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
++ is_vma_resv_set(vma, HPAGE_RESV_OWNER);
++}
++
+ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
+ {
+ VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index f084a4a6b7ab2..8e0a90b45df22 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -181,7 +181,7 @@ out:
+ }
+
+ static const struct genl_multicast_group dropmon_mcgrps[] = {
+- { .name = "events", },
++ { .name = "events", .cap_sys_admin = 1 },
+ };
+
+ static void send_dm_alert(struct work_struct *work)
+@@ -1604,11 +1604,13 @@ static const struct genl_small_ops dropmon_ops[] = {
+ .cmd = NET_DM_CMD_START,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = net_dm_cmd_trace,
++ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NET_DM_CMD_STOP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = net_dm_cmd_trace,
++ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NET_DM_CMD_CONFIG_GET,
+diff --git a/net/core/filter.c b/net/core/filter.c
+index adc327f4af1e9..3a6110ea4009f 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2582,6 +2582,22 @@ BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
+ return 0;
+ }
+
++static void sk_msg_reset_curr(struct sk_msg *msg)
++{
++ u32 i = msg->sg.start;
++ u32 len = 0;
++
++ do {
++ len += sk_msg_elem(msg, i)->length;
++ sk_msg_iter_var_next(i);
++ if (len >= msg->sg.size)
++ break;
++ } while (i != msg->sg.end);
++
++ msg->sg.curr = i;
++ msg->sg.copybreak = 0;
++}
++
+ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
+ .func = bpf_msg_cork_bytes,
+ .gpl_only = false,
+@@ -2701,6 +2717,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
+ msg->sg.end - shift + NR_MSG_FRAG_IDS :
+ msg->sg.end - shift;
+ out:
++ sk_msg_reset_curr(msg);
+ msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
+ msg->data_end = msg->data + bytes;
+ return 0;
+@@ -2837,6 +2854,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
+ msg->sg.data[new] = rsge;
+ }
+
++ sk_msg_reset_curr(msg);
+ sk_msg_compute_data_pointers(msg);
+ return 0;
+ }
+@@ -3005,6 +3023,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
+
+ sk_mem_uncharge(msg->sk, len - pop);
+ msg->sg.size -= (len - pop);
++ sk_msg_reset_curr(msg);
+ sk_msg_compute_data_pointers(msg);
+ return 0;
+ }
+diff --git a/net/core/scm.c b/net/core/scm.c
+index acb7d776fa6ec..e762a4b8a1d22 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -26,6 +26,7 @@
+ #include <linux/nsproxy.h>
+ #include <linux/slab.h>
+ #include <linux/errqueue.h>
++#include <linux/io_uring.h>
+
+ #include <linux/uaccess.h>
+
+@@ -103,6 +104,11 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
+
+ if (fd < 0 || !(file = fget_raw(fd)))
+ return -EBADF;
++ /* don't allow io_uring files */
++ if (io_uring_get_socket(file)) {
++ fput(file);
++ return -EINVAL;
++ }
+ *fpp++ = file;
+ fpl->count++;
+ }
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 5b8242265617d..d67d026d7f975 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -634,15 +634,18 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
+ }
+
+ if (dev->header_ops) {
++ int pull_len = tunnel->hlen + sizeof(struct iphdr);
++
+ if (skb_cow_head(skb, 0))
+ goto free_skb;
+
+ tnl_params = (const struct iphdr *)skb->data;
+
+- /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
+- * to gre header.
+- */
+- skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
++ if (!pskb_network_may_pull(skb, pull_len))
++ goto free_skb;
++
++ /* ip_tunnel_xmit() needs skb->data pointing to gre header. */
++ skb_pull(skb, pull_len);
+ skb_reset_mac_header(skb);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 288678f17ccaf..58409ea2da0af 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3473,9 +3473,25 @@ int tcp_set_window_clamp(struct sock *sk, int val)
+ return -EINVAL;
+ tp->window_clamp = 0;
+ } else {
+- tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
+- SOCK_MIN_RCVBUF / 2 : val;
+- tp->rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
++ u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp;
++ u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
++ SOCK_MIN_RCVBUF / 2 : val;
++
++ if (new_window_clamp == old_window_clamp)
++ return 0;
++
++ tp->window_clamp = new_window_clamp;
++ if (new_window_clamp < old_window_clamp) {
++ /* need to apply the reserved mem provisioning only
++ * when shrinking the window clamp
++ */
++ __tcp_adjust_rcv_ssthresh(sk, tp->window_clamp);
++
++ } else {
++ new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
++ tp->rcv_ssthresh = max(new_rcv_ssthresh,
++ tp->rcv_ssthresh);
++ }
+ }
+ return 0;
+ }
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 65dae3d43684f..34460c9b37ae2 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3803,8 +3803,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ * then we can probably ignore it.
+ */
+ if (before(ack, prior_snd_una)) {
++ u32 max_window;
++
++ /* do not accept ACK for bytes we never sent. */
++ max_window = min_t(u64, tp->max_window, tp->bytes_acked);
+ /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
+- if (before(ack, prior_snd_una - tp->max_window)) {
++ if (before(ack, prior_snd_una - max_window)) {
+ if (!(flag & FLAG_NO_CHALLENGE_ACK))
+ tcp_send_challenge_ack(sk);
+ return -SKB_DROP_REASON_TCP_TOO_OLD_ACK;
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index eb6640f9a7921..1840735e9cb07 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1502,13 +1502,9 @@ out:
+ if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
+ pn_leaf = fib6_find_prefix(info->nl_net, table,
+ pn);
+-#if RT6_DEBUG >= 2
+- if (!pn_leaf) {
+- WARN_ON(!pn_leaf);
++ if (!pn_leaf)
+ pn_leaf =
+ info->nl_net->ipv6.fib6_null_entry;
+- }
+-#endif
+ fib6_info_hold(pn_leaf);
+ rcu_assign_pointer(pn->leaf, pn_leaf);
+ }
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 20eede37d5228..d47dfdcb899b0 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -61,6 +61,8 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
+ ip_set_dereference((inst)->ip_set_list)[id]
+ #define ip_set_ref_netlink(inst,id) \
+ rcu_dereference_raw((inst)->ip_set_list)[id]
++#define ip_set_dereference_nfnl(p) \
++ rcu_dereference_check(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
+
+ /* The set types are implemented in modules and registered set types
+ * can be found in ip_set_type_list. Adding/deleting types is
+@@ -708,15 +710,10 @@ __ip_set_put_netlink(struct ip_set *set)
+ static struct ip_set *
+ ip_set_rcu_get(struct net *net, ip_set_id_t index)
+ {
+- struct ip_set *set;
+ struct ip_set_net *inst = ip_set_pernet(net);
+
+- rcu_read_lock();
+- /* ip_set_list itself needs to be protected */
+- set = rcu_dereference(inst->ip_set_list)[index];
+- rcu_read_unlock();
+-
+- return set;
++ /* ip_set_list and the set pointer need to be protected */
++ return ip_set_dereference_nfnl(inst->ip_set_list)[index];
+ }
+
+ static inline void
+@@ -1399,6 +1396,9 @@ static int ip_set_swap(struct sk_buff *skb, const struct nfnl_info *info,
+ ip_set(inst, to_id) = from;
+ write_unlock_bh(&ip_set_ref_lock);
+
++ /* Make sure all readers of the old set pointers are completed. */
++ synchronize_rcu();
++
+ return 0;
+ }
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 421211eba838b..05fa5141af516 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -805,7 +805,7 @@ static struct nft_table *nft_table_lookup(const struct net *net,
+
+ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
+ const struct nlattr *nla,
+- u8 genmask, u32 nlpid)
++ int family, u8 genmask, u32 nlpid)
+ {
+ struct nftables_pernet *nft_net;
+ struct nft_table *table;
+@@ -813,6 +813,7 @@ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
+ nft_net = nft_pernet(net);
+ list_for_each_entry(table, &nft_net->tables, list) {
+ if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
++ table->family == family &&
+ nft_active_genmask(table, genmask)) {
+ if (nft_table_has_owner(table) &&
+ nlpid && table->nlpid != nlpid)
+@@ -1537,7 +1538,7 @@ static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info,
+
+ if (nla[NFTA_TABLE_HANDLE]) {
+ attr = nla[NFTA_TABLE_HANDLE];
+- table = nft_table_lookup_byhandle(net, attr, genmask,
++ table = nft_table_lookup_byhandle(net, attr, family, genmask,
+ NETLINK_CB(skb).portid);
+ } else {
+ attr = nla[NFTA_TABLE_NAME];
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index cf9a1ae87d9b1..a470e5f612843 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -279,10 +279,15 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ priv->expr_array[i] = dynset_expr;
+ priv->num_exprs++;
+
+- if (set->num_exprs &&
+- dynset_expr->ops != set->exprs[i]->ops) {
+- err = -EOPNOTSUPP;
+- goto err_expr_free;
++ if (set->num_exprs) {
++ if (i >= set->num_exprs) {
++ err = -EINVAL;
++ goto err_expr_free;
++ }
++ if (dynset_expr->ops != set->exprs[i]->ops) {
++ err = -EOPNOTSUPP;
++ goto err_expr_free;
++ }
+ }
+ i++;
+ }
+diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
+index efb50c2b41f32..de588f7b69c45 100644
+--- a/net/netfilter/nft_exthdr.c
++++ b/net/netfilter/nft_exthdr.c
+@@ -10,6 +10,7 @@
+ #include <linux/netlink.h>
+ #include <linux/netfilter.h>
+ #include <linux/netfilter/nf_tables.h>
++#include <linux/dccp.h>
+ #include <linux/sctp.h>
+ #include <net/netfilter/nf_tables_core.h>
+ #include <net/netfilter/nf_tables.h>
+@@ -214,7 +215,7 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
+
+ offset = i + priv->offset;
+ if (priv->flags & NFT_EXTHDR_F_PRESENT) {
+- *dest = 1;
++ nft_reg_store8(dest, 1);
+ } else {
+ if (priv->len % NFT_REG32_SIZE)
+ dest[priv->len / NFT_REG32_SIZE] = 0;
+@@ -409,6 +410,82 @@ err:
+ regs->verdict.code = NFT_BREAK;
+ }
+
++static void nft_exthdr_dccp_eval(const struct nft_expr *expr,
++ struct nft_regs *regs,
++ const struct nft_pktinfo *pkt)
++{
++ struct nft_exthdr *priv = nft_expr_priv(expr);
++ unsigned int thoff, dataoff, optoff, optlen, i;
++ u32 *dest = &regs->data[priv->dreg];
++ const struct dccp_hdr *dh;
++ struct dccp_hdr _dh;
++
++ if (pkt->tprot != IPPROTO_DCCP || pkt->fragoff)
++ goto err;
++
++ thoff = nft_thoff(pkt);
++
++ dh = skb_header_pointer(pkt->skb, thoff, sizeof(_dh), &_dh);
++ if (!dh)
++ goto err;
++
++ dataoff = dh->dccph_doff * sizeof(u32);
++ optoff = __dccp_hdr_len(dh);
++ if (dataoff <= optoff)
++ goto err;
++
++ optlen = dataoff - optoff;
++
++ for (i = 0; i < optlen; ) {
++ /* Options 0 (DCCPO_PADDING) - 31 (DCCPO_MAX_RESERVED) are 1B in
++ * the length; the remaining options are at least 2B long. In
++ * all cases, the first byte contains the option type. In
++ * multi-byte options, the second byte contains the option
++ * length, which must be at least two: 1 for the type plus 1 for
++ * the length plus 0-253 for any following option data. We
++ * aren't interested in the option data, only the type and the
++ * length, so we don't need to read more than two bytes at a
++ * time.
++ */
++ unsigned int buflen = optlen - i;
++ u8 buf[2], *bufp;
++ u8 type, len;
++
++ if (buflen > sizeof(buf))
++ buflen = sizeof(buf);
++
++ bufp = skb_header_pointer(pkt->skb, thoff + optoff + i, buflen,
++ &buf);
++ if (!bufp)
++ goto err;
++
++ type = bufp[0];
++
++ if (type == priv->type) {
++ nft_reg_store8(dest, 1);
++ return;
++ }
++
++ if (type <= DCCPO_MAX_RESERVED) {
++ i++;
++ continue;
++ }
++
++ if (buflen < 2)
++ goto err;
++
++ len = bufp[1];
++
++ if (len < 2)
++ goto err;
++
++ i += len;
++ }
++
++err:
++ *dest = 0;
++}
++
+ static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
+ [NFTA_EXTHDR_DREG] = { .type = NLA_U32 },
+ [NFTA_EXTHDR_TYPE] = { .type = NLA_U8 },
+@@ -560,6 +637,22 @@ static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx,
+ return 0;
+ }
+
++static int nft_exthdr_dccp_init(const struct nft_ctx *ctx,
++ const struct nft_expr *expr,
++ const struct nlattr * const tb[])
++{
++ struct nft_exthdr *priv = nft_expr_priv(expr);
++ int err = nft_exthdr_init(ctx, expr, tb);
++
++ if (err < 0)
++ return err;
++
++ if (!(priv->flags & NFT_EXTHDR_F_PRESENT))
++ return -EOPNOTSUPP;
++
++ return 0;
++}
++
+ static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv)
+ {
+ if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
+@@ -686,6 +779,15 @@ static const struct nft_expr_ops nft_exthdr_sctp_ops = {
+ .reduce = nft_exthdr_reduce,
+ };
+
++static const struct nft_expr_ops nft_exthdr_dccp_ops = {
++ .type = &nft_exthdr_type,
++ .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
++ .eval = nft_exthdr_dccp_eval,
++ .init = nft_exthdr_dccp_init,
++ .dump = nft_exthdr_dump,
++ .reduce = nft_exthdr_reduce,
++};
++
+ static const struct nft_expr_ops *
+ nft_exthdr_select_ops(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+@@ -720,6 +822,10 @@ nft_exthdr_select_ops(const struct nft_ctx *ctx,
+ if (tb[NFTA_EXTHDR_DREG])
+ return &nft_exthdr_sctp_ops;
+ break;
++ case NFT_EXTHDR_OP_DCCP:
++ if (tb[NFTA_EXTHDR_DREG])
++ return &nft_exthdr_dccp_ops;
++ break;
+ }
+
+ return ERR_PTR(-EOPNOTSUPP);
+diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
+index 1f12d7ade606c..5748415f74d0b 100644
+--- a/net/netfilter/nft_fib.c
++++ b/net/netfilter/nft_fib.c
+@@ -144,11 +144,15 @@ void nft_fib_store_result(void *reg, const struct nft_fib *priv,
+ switch (priv->result) {
+ case NFT_FIB_RESULT_OIF:
+ index = dev ? dev->ifindex : 0;
+- *dreg = (priv->flags & NFTA_FIB_F_PRESENT) ? !!index : index;
++ if (priv->flags & NFTA_FIB_F_PRESENT)
++ nft_reg_store8(dreg, !!index);
++ else
++ *dreg = index;
++
+ break;
+ case NFT_FIB_RESULT_OIFNAME:
+ if (priv->flags & NFTA_FIB_F_PRESENT)
+- *dreg = !!dev;
++ nft_reg_store8(dreg, !!dev);
+ else
+ strncpy(reg, dev ? dev->name : "", IFNAMSIZ);
+ break;
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index deea6196d9925..4e1cc31729b80 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -2042,6 +2042,9 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
+
+ e = f->mt[r].e;
+
++ if (!nft_set_elem_active(&e->ext, iter->genmask))
++ goto cont;
++
+ elem.priv = e;
+
+ iter->err = iter->fn(ctx, set, iter, &elem);
+diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c
+index e85ce69924aee..50332888c8d23 100644
+--- a/net/netfilter/xt_owner.c
++++ b/net/netfilter/xt_owner.c
+@@ -76,18 +76,23 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ */
+ return false;
+
+- filp = sk->sk_socket->file;
+- if (filp == NULL)
++ read_lock_bh(&sk->sk_callback_lock);
++ filp = sk->sk_socket ? sk->sk_socket->file : NULL;
++ if (filp == NULL) {
++ read_unlock_bh(&sk->sk_callback_lock);
+ return ((info->match ^ info->invert) &
+ (XT_OWNER_UID | XT_OWNER_GID)) == 0;
++ }
+
+ if (info->match & XT_OWNER_UID) {
+ kuid_t uid_min = make_kuid(net->user_ns, info->uid_min);
+ kuid_t uid_max = make_kuid(net->user_ns, info->uid_max);
+ if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
+ uid_lte(filp->f_cred->fsuid, uid_max)) ^
+- !(info->invert & XT_OWNER_UID))
++ !(info->invert & XT_OWNER_UID)) {
++ read_unlock_bh(&sk->sk_callback_lock);
+ return false;
++ }
+ }
+
+ if (info->match & XT_OWNER_GID) {
+@@ -112,10 +117,13 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ }
+ }
+
+- if (match ^ !(info->invert & XT_OWNER_GID))
++ if (match ^ !(info->invert & XT_OWNER_GID)) {
++ read_unlock_bh(&sk->sk_callback_lock);
+ return false;
++ }
+ }
+
++ read_unlock_bh(&sk->sk_callback_lock);
+ return true;
+ }
+
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 3e16527beb914..505d3b910cc29 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -1438,6 +1438,9 @@ static int genl_bind(struct net *net, int group)
+ if ((grp->flags & GENL_UNS_ADMIN_PERM) &&
+ !ns_capable(net->user_ns, CAP_NET_ADMIN))
+ ret = -EPERM;
++ if (grp->cap_sys_admin &&
++ !ns_capable(net->user_ns, CAP_SYS_ADMIN))
++ ret = -EPERM;
+
+ break;
+ }
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 451bd8bfafd23..51882f07ef70c 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4275,7 +4275,7 @@ static void packet_mm_open(struct vm_area_struct *vma)
+ struct sock *sk = sock->sk;
+
+ if (sk)
+- atomic_inc(&pkt_sk(sk)->mapped);
++ atomic_long_inc(&pkt_sk(sk)->mapped);
+ }
+
+ static void packet_mm_close(struct vm_area_struct *vma)
+@@ -4285,7 +4285,7 @@ static void packet_mm_close(struct vm_area_struct *vma)
+ struct sock *sk = sock->sk;
+
+ if (sk)
+- atomic_dec(&pkt_sk(sk)->mapped);
++ atomic_long_dec(&pkt_sk(sk)->mapped);
+ }
+
+ static const struct vm_operations_struct packet_mmap_ops = {
+@@ -4380,7 +4380,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+
+ err = -EBUSY;
+ if (!closing) {
+- if (atomic_read(&po->mapped))
++ if (atomic_long_read(&po->mapped))
+ goto out;
+ if (packet_read_pending(rb))
+ goto out;
+@@ -4483,7 +4483,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+
+ err = -EBUSY;
+ mutex_lock(&po->pg_vec_lock);
+- if (closing || atomic_read(&po->mapped) == 0) {
++ if (closing || atomic_long_read(&po->mapped) == 0) {
+ err = 0;
+ spin_lock_bh(&rb_queue->lock);
+ swap(rb->pg_vec, pg_vec);
+@@ -4501,9 +4501,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ po->prot_hook.func = (po->rx_ring.pg_vec) ?
+ tpacket_rcv : packet_rcv;
+ skb_queue_purge(rb_queue);
+- if (atomic_read(&po->mapped))
+- pr_err("packet_mmap: vma is busy: %d\n",
+- atomic_read(&po->mapped));
++ if (atomic_long_read(&po->mapped))
++ pr_err("packet_mmap: vma is busy: %ld\n",
++ atomic_long_read(&po->mapped));
+ }
+ mutex_unlock(&po->pg_vec_lock);
+
+@@ -4581,7 +4581,7 @@ static int packet_mmap(struct file *file, struct socket *sock,
+ }
+ }
+
+- atomic_inc(&po->mapped);
++ atomic_long_inc(&po->mapped);
+ vma->vm_ops = &packet_mmap_ops;
+ err = 0;
+
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index 3bae8ea7a36f5..b2edfe6fc8e77 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -126,7 +126,7 @@ struct packet_sock {
+ __be16 num;
+ struct packet_rollover *rollover;
+ struct packet_mclist *mclist;
+- atomic_t mapped;
++ atomic_long_t mapped;
+ enum tpacket_versions tp_version;
+ unsigned int tp_hdrlen;
+ unsigned int tp_reserve;
+diff --git a/net/psample/psample.c b/net/psample/psample.c
+index 81a794e36f535..c34e902855dbe 100644
+--- a/net/psample/psample.c
++++ b/net/psample/psample.c
+@@ -31,7 +31,8 @@ enum psample_nl_multicast_groups {
+
+ static const struct genl_multicast_group psample_nl_mcgrps[] = {
+ [PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
+- [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
++ [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME,
++ .flags = GENL_UNS_ADMIN_PERM },
+ };
+
+ static struct genl_family psample_nl_family __ro_after_init;
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index f7592638e61d3..5c8e02d56fd43 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -722,7 +722,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
+
+ rcu_read_lock();
+ if (xsk_check_common(xs))
+- goto skip_tx;
++ goto out;
+
+ pool = xs->pool;
+
+@@ -734,12 +734,11 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
+ xsk_generic_xmit(sk);
+ }
+
+-skip_tx:
+ if (xs->rx && !xskq_prod_is_empty(xs->rx))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (xs->tx && xsk_tx_writeable(xs))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+-
++out:
+ rcu_read_unlock();
+ return mask;
+ }
+diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
+index d48dfed6d3dba..a0f9101d5fd44 100755
+--- a/scripts/checkstack.pl
++++ b/scripts/checkstack.pl
+@@ -146,15 +146,11 @@ $total_size = 0;
+ while (my $line = <STDIN>) {
+ if ($line =~ m/$funcre/) {
+ $func = $1;
+- next if $line !~ m/^($xs*)/;
++ next if $line !~ m/^($x*)/;
+ if ($total_size > $min_stack) {
+ push @stack, "$intro$total_size\n";
+ }
+-
+- $addr = $1;
+- $addr =~ s/ /0/g;
+- $addr = "0x$addr";
+-
++ $addr = "0x$1";
+ $intro = "$addr $func [$file]:";
+ my $padlen = 56 - length($intro);
+ while ($padlen > 0) {
+diff --git a/scripts/dtc/dt-extract-compatibles b/scripts/dtc/dt-extract-compatibles
+index a1119762ed086..9686a1cf85498 100755
+--- a/scripts/dtc/dt-extract-compatibles
++++ b/scripts/dtc/dt-extract-compatibles
+@@ -1,8 +1,8 @@
+ #!/usr/bin/env python3
+ # SPDX-License-Identifier: GPL-2.0-only
+
++import fnmatch
+ import os
+-import glob
+ import re
+ import argparse
+
+@@ -49,6 +49,24 @@ def print_compat(filename, compatibles):
+ else:
+ print(*compatibles, sep='\n')
+
++def glob_without_symlinks(root, glob):
++ for path, dirs, files in os.walk(root):
++ # Ignore hidden directories
++ for d in dirs:
++ if fnmatch.fnmatch(d, ".*"):
++ dirs.remove(d)
++ for f in files:
++ if fnmatch.fnmatch(f, glob):
++ yield os.path.join(path, f)
++
++def files_to_parse(path_args):
++ for f in path_args:
++ if os.path.isdir(f):
++ for filename in glob_without_symlinks(f, "*.c"):
++ yield filename
++ else:
++ yield f
++
+ show_filename = False
+
+ if __name__ == "__main__":
+@@ -59,11 +77,6 @@ if __name__ == "__main__":
+
+ show_filename = args.with_filename
+
+- for f in args.cfile:
+- if os.path.isdir(f):
+- for filename in glob.iglob(f + "/**/*.c", recursive=True):
+- compat_list = parse_compatibles(filename)
+- print_compat(filename, compat_list)
+- else:
+- compat_list = parse_compatibles(f)
+- print_compat(f, compat_list)
++ for f in files_to_parse(args.cfile):
++ compat_list = parse_compatibles(f)
++ print_compat(f, compat_list)
+diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
+index 0572330bf8a78..a76925b46ce63 100644
+--- a/scripts/kconfig/symbol.c
++++ b/scripts/kconfig/symbol.c
+@@ -122,9 +122,9 @@ static long long sym_get_range_val(struct symbol *sym, int base)
+ static void sym_validate_range(struct symbol *sym)
+ {
+ struct property *prop;
++ struct symbol *range_sym;
+ int base;
+ long long val, val2;
+- char str[64];
+
+ switch (sym->type) {
+ case S_INT:
+@@ -140,17 +140,15 @@ static void sym_validate_range(struct symbol *sym)
+ if (!prop)
+ return;
+ val = strtoll(sym->curr.val, NULL, base);
+- val2 = sym_get_range_val(prop->expr->left.sym, base);
++ range_sym = prop->expr->left.sym;
++ val2 = sym_get_range_val(range_sym, base);
+ if (val >= val2) {
+- val2 = sym_get_range_val(prop->expr->right.sym, base);
++ range_sym = prop->expr->right.sym;
++ val2 = sym_get_range_val(range_sym, base);
+ if (val <= val2)
+ return;
+ }
+- if (sym->type == S_INT)
+- sprintf(str, "%lld", val2);
+- else
+- sprintf(str, "0x%llx", val2);
+- sym->curr.val = xstrdup(str);
++ sym->curr.val = range_sym->curr.val;
+ }
+
+ static void sym_set_changed(struct symbol *sym)
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index 9d95e37311230..2415a3c3ac6c9 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -253,6 +253,7 @@ static const char * const snd_pcm_state_names[] = {
+ STATE(DRAINING),
+ STATE(PAUSED),
+ STATE(SUSPENDED),
++ STATE(DISCONNECTED),
+ };
+
+ static const char * const snd_pcm_access_names[] = {
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d1944c83b03a2..c6cae3369a6a1 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10031,6 +10031,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+ SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0xf111, 0x0005, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+
+ #if 0
+ /* Below is a quirk table taken from the old code.
+@@ -11952,6 +11954,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x3321, "Lenovo ThinkCentre M70 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x331b, "Lenovo ThinkCentre M90 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
++ SND_PCI_QUIRK(0x17aa, 0x3364, "Lenovo ThinkCentre M90 Gen5", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2),
+ SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index c494de5f5c066..1dde1f3196acc 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -283,6 +283,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "E1504FA"),
++ }
++ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
+index 9f59518005a5f..840bbe991cd3a 100644
+--- a/sound/soc/codecs/lpass-tx-macro.c
++++ b/sound/soc/codecs/lpass-tx-macro.c
+@@ -1871,6 +1871,11 @@ static int tx_macro_probe(struct platform_device *pdev)
+
+ tx->dev = dev;
+
++ /* Set active_decimator default value */
++ tx->active_decimator[TX_MACRO_AIF1_CAP] = -1;
++ tx->active_decimator[TX_MACRO_AIF2_CAP] = -1;
++ tx->active_decimator[TX_MACRO_AIF3_CAP] = -1;
++
+ /* set MCLK and NPL rates */
+ clk_set_rate(tx->mclk, MCLK_FREQ);
+ clk_set_rate(tx->npl, MCLK_FREQ);
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 8a2e9771bb50e..2cfca78f0401f 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -1401,12 +1401,12 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
+ ret = wm_adsp_buffer_read(buf, caps->region_defs[i].base_offset,
+ &region->base_addr);
+ if (ret < 0)
+- return ret;
++ goto err;
+
+ ret = wm_adsp_buffer_read(buf, caps->region_defs[i].size_offset,
+ &offset);
+ if (ret < 0)
+- return ret;
++ goto err;
+
+ region->cumulative_size = offset;
+
+@@ -1417,6 +1417,10 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
+ }
+
+ return 0;
++
++err:
++ kfree(buf->regions);
++ return ret;
+ }
+
+ static void wm_adsp_buffer_clear(struct wm_adsp_compr_buf *buf)
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 96fd9095e544b..6364d9be28fbb 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -674,6 +674,20 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
+ FSL_SAI_CR3_TRCE_MASK,
+ FSL_SAI_CR3_TRCE((dl_cfg[dl_cfg_idx].mask[tx] & trce_mask)));
+
++ /*
++ * When the TERE and FSD_MSTR enabled before configuring the word width
++ * There will be no frame sync clock issue, because word width impact
++ * the generation of frame sync clock.
++ *
++ * TERE enabled earlier only for i.MX8MP case for the hardware limitation,
++ * We need to disable FSD_MSTR before configuring word width, then enable
++ * FSD_MSTR bit for this specific case.
++ */
++ if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output &&
++ !sai->is_consumer_mode)
++ regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
++ FSL_SAI_CR4_FSD_MSTR, 0);
++
+ regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
+ FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK |
+ FSL_SAI_CR4_CHMOD_MASK,
+@@ -681,6 +695,13 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
+ regmap_update_bits(sai->regmap, FSL_SAI_xCR5(tx, ofs),
+ FSL_SAI_CR5_WNW_MASK | FSL_SAI_CR5_W0W_MASK |
+ FSL_SAI_CR5_FBT_MASK, val_cr5);
++
++ /* Enable FSD_MSTR after configuring word width */
++ if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output &&
++ !sai->is_consumer_mode)
++ regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
++ FSL_SAI_CR4_FSD_MSTR, FSL_SAI_CR4_FSD_MSTR);
++
+ regmap_write(sai->regmap, FSL_SAI_xMR(tx),
+ ~0UL - ((1 << min(channels, slots)) - 1));
+
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index ab0d459f42715..1f32e3ae3aa31 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -2978,6 +2978,7 @@ static int snd_bbfpro_controls_create(struct usb_mixer_interface *mixer)
+ #define SND_DJM_850_IDX 0x2
+ #define SND_DJM_900NXS2_IDX 0x3
+ #define SND_DJM_750MK2_IDX 0x4
++#define SND_DJM_450_IDX 0x5
+
+
+ #define SND_DJM_CTL(_name, suffix, _default_value, _windex) { \
+@@ -3108,6 +3109,31 @@ static const struct snd_djm_ctl snd_djm_ctls_250mk2[] = {
+ };
+
+
++// DJM-450
++static const u16 snd_djm_opts_450_cap1[] = {
++ 0x0103, 0x0100, 0x0106, 0x0107, 0x0108, 0x0109, 0x010d, 0x010a };
++
++static const u16 snd_djm_opts_450_cap2[] = {
++ 0x0203, 0x0200, 0x0206, 0x0207, 0x0208, 0x0209, 0x020d, 0x020a };
++
++static const u16 snd_djm_opts_450_cap3[] = {
++ 0x030a, 0x0311, 0x0312, 0x0307, 0x0308, 0x0309, 0x030d };
++
++static const u16 snd_djm_opts_450_pb1[] = { 0x0100, 0x0101, 0x0104 };
++static const u16 snd_djm_opts_450_pb2[] = { 0x0200, 0x0201, 0x0204 };
++static const u16 snd_djm_opts_450_pb3[] = { 0x0300, 0x0301, 0x0304 };
++
++static const struct snd_djm_ctl snd_djm_ctls_450[] = {
++ SND_DJM_CTL("Capture Level", cap_level, 0, SND_DJM_WINDEX_CAPLVL),
++ SND_DJM_CTL("Ch1 Input", 450_cap1, 2, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch2 Input", 450_cap2, 2, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch3 Input", 450_cap3, 0, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch1 Output", 450_pb1, 0, SND_DJM_WINDEX_PB),
++ SND_DJM_CTL("Ch2 Output", 450_pb2, 1, SND_DJM_WINDEX_PB),
++ SND_DJM_CTL("Ch3 Output", 450_pb3, 2, SND_DJM_WINDEX_PB)
++};
++
++
+ // DJM-750
+ static const u16 snd_djm_opts_750_cap1[] = {
+ 0x0101, 0x0103, 0x0106, 0x0107, 0x0108, 0x0109, 0x010a, 0x010f };
+@@ -3203,6 +3229,7 @@ static const struct snd_djm_device snd_djm_devices[] = {
+ [SND_DJM_850_IDX] = SND_DJM_DEVICE(850),
+ [SND_DJM_900NXS2_IDX] = SND_DJM_DEVICE(900nxs2),
+ [SND_DJM_750MK2_IDX] = SND_DJM_DEVICE(750mk2),
++ [SND_DJM_450_IDX] = SND_DJM_DEVICE(450),
+ };
+
+
+@@ -3449,6 +3476,9 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
+ case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
+ err = snd_djm_controls_create(mixer, SND_DJM_250MK2_IDX);
+ break;
++ case USB_ID(0x2b73, 0x0013): /* Pioneer DJ DJM-450 */
++ err = snd_djm_controls_create(mixer, SND_DJM_450_IDX);
++ break;
+ case USB_ID(0x08e4, 0x017f): /* Pioneer DJ DJM-750 */
+ err = snd_djm_controls_create(mixer, SND_DJM_750_IDX);
+ break;