summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2023-11-28 12:50:28 -0500
committerMike Pagano <mpagano@gentoo.org>2023-11-28 12:50:28 -0500
commit35b0948c626964e560310987774c01a06d3282f9 (patch)
tree4521d8861ceab79513d8403864cf8e7c192f7ff6
parentLinux patch 6.5.12 (diff)
downloadlinux-patches-35b0948c626964e560310987774c01a06d3282f9.tar.gz
linux-patches-35b0948c626964e560310987774c01a06d3282f9.tar.bz2
linux-patches-35b0948c626964e560310987774c01a06d3282f9.zip
Linux patch 6.5.136.5-15
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1012_linux-6.5.13.patch20729
2 files changed, 20733 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index fd46c9a5..12ba051a 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch: 1011_linux-6.5.12.patch
From: https://www.kernel.org
Desc: Linux 6.5.12
+Patch: 1012_linux-6.5.13.patch
+From: https://www.kernel.org
+Desc: Linux 6.5.13
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1012_linux-6.5.13.patch b/1012_linux-6.5.13.patch
new file mode 100644
index 00000000..f79c48fd
--- /dev/null
+++ b/1012_linux-6.5.13.patch
@@ -0,0 +1,20729 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 23ebe34ff901e..5711129686d10 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2938,6 +2938,10 @@
+ locktorture.torture_type= [KNL]
+ Specify the locking implementation to test.
+
++ locktorture.writer_fifo= [KNL]
++ Run the write-side locktorture kthreads at
++ sched_set_fifo() real-time priority.
++
+ locktorture.verbose= [KNL]
+ Enable additional printk() statements.
+
+@@ -5781,6 +5785,13 @@
+ This feature may be more efficiently disabled
+ using the csdlock_debug- kernel parameter.
+
++ smp.panic_on_ipistall= [KNL]
++ If a csd_lock_timeout extends for more than
++ the specified number of milliseconds, panic the
++ system. By default, let CSD-lock acquisition
++ take as long as they take. Specifying 300,000
++ for this value provides a 5-minute timeout.
++
+ smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices
+ smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port
+ smsc-ircc2.ircc_sir= [HW] SIR base I/O port
+diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+index 083fda530b484..828650d4c4b09 100644
+--- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
++++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+@@ -27,6 +27,27 @@ properties:
+
+ vdd3-supply: true
+
++ qcom,tune-usb2-disc-thres:
++ $ref: /schemas/types.yaml#/definitions/uint8
++ description: High-Speed disconnect threshold
++ minimum: 0
++ maximum: 7
++ default: 0
++
++ qcom,tune-usb2-amplitude:
++ $ref: /schemas/types.yaml#/definitions/uint8
++ description: High-Speed trasmit amplitude
++ minimum: 0
++ maximum: 15
++ default: 8
++
++ qcom,tune-usb2-preem:
++ $ref: /schemas/types.yaml#/definitions/uint8
++ description: High-Speed TX pre-emphasis tuning
++ minimum: 0
++ maximum: 7
++ default: 5
++
+ required:
+ - compatible
+ - reg
+diff --git a/Documentation/devicetree/bindings/serial/serial.yaml b/Documentation/devicetree/bindings/serial/serial.yaml
+index ea277560a5966..5727bd549deca 100644
+--- a/Documentation/devicetree/bindings/serial/serial.yaml
++++ b/Documentation/devicetree/bindings/serial/serial.yaml
+@@ -96,7 +96,7 @@ then:
+ rts-gpios: false
+
+ patternProperties:
+- "^bluetooth|gnss|gps|mcu$":
++ "^(bluetooth|gnss|gps|mcu)$":
+ if:
+ type: object
+ then:
+diff --git a/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml b/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
+index bffdab0b01859..fbac40b958dde 100644
+--- a/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
++++ b/Documentation/devicetree/bindings/timer/renesas,rz-mtu3.yaml
+@@ -169,27 +169,27 @@ properties:
+ - const: tgib0
+ - const: tgic0
+ - const: tgid0
+- - const: tgiv0
++ - const: tciv0
+ - const: tgie0
+ - const: tgif0
+ - const: tgia1
+ - const: tgib1
+- - const: tgiv1
+- - const: tgiu1
++ - const: tciv1
++ - const: tciu1
+ - const: tgia2
+ - const: tgib2
+- - const: tgiv2
+- - const: tgiu2
++ - const: tciv2
++ - const: tciu2
+ - const: tgia3
+ - const: tgib3
+ - const: tgic3
+ - const: tgid3
+- - const: tgiv3
++ - const: tciv3
+ - const: tgia4
+ - const: tgib4
+ - const: tgic4
+ - const: tgid4
+- - const: tgiv4
++ - const: tciv4
+ - const: tgiu5
+ - const: tgiv5
+ - const: tgiw5
+@@ -197,18 +197,18 @@ properties:
+ - const: tgib6
+ - const: tgic6
+ - const: tgid6
+- - const: tgiv6
++ - const: tciv6
+ - const: tgia7
+ - const: tgib7
+ - const: tgic7
+ - const: tgid7
+- - const: tgiv7
++ - const: tciv7
+ - const: tgia8
+ - const: tgib8
+ - const: tgic8
+ - const: tgid8
+- - const: tgiv8
+- - const: tgiu8
++ - const: tciv8
++ - const: tciu8
+
+ clocks:
+ maxItems: 1
+@@ -285,16 +285,16 @@ examples:
+ <GIC_SPI 211 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 212 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 213 IRQ_TYPE_EDGE_RISING>;
+- interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tgiv0", "tgie0",
++ interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tciv0", "tgie0",
+ "tgif0",
+- "tgia1", "tgib1", "tgiv1", "tgiu1",
+- "tgia2", "tgib2", "tgiv2", "tgiu2",
+- "tgia3", "tgib3", "tgic3", "tgid3", "tgiv3",
+- "tgia4", "tgib4", "tgic4", "tgid4", "tgiv4",
++ "tgia1", "tgib1", "tciv1", "tciu1",
++ "tgia2", "tgib2", "tciv2", "tciu2",
++ "tgia3", "tgib3", "tgic3", "tgid3", "tciv3",
++ "tgia4", "tgib4", "tgic4", "tgid4", "tciv4",
+ "tgiu5", "tgiv5", "tgiw5",
+- "tgia6", "tgib6", "tgic6", "tgid6", "tgiv6",
+- "tgia7", "tgib7", "tgic7", "tgid7", "tgiv7",
+- "tgia8", "tgib8", "tgic8", "tgid8", "tgiv8", "tgiu8";
++ "tgia6", "tgib6", "tgic6", "tgid6", "tciv6",
++ "tgia7", "tgib7", "tgic7", "tgid7", "tciv7",
++ "tgia8", "tgib8", "tgic8", "tgid8", "tciv8", "tciu8";
+ clocks = <&cpg CPG_MOD R9A07G044_MTU_X_MCK_MTU3>;
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G044_MTU_X_PRESET_MTU3>;
+diff --git a/Documentation/i2c/busses/i2c-i801.rst b/Documentation/i2c/busses/i2c-i801.rst
+index e76e68ccf7182..10eced6c2e462 100644
+--- a/Documentation/i2c/busses/i2c-i801.rst
++++ b/Documentation/i2c/busses/i2c-i801.rst
+@@ -47,6 +47,7 @@ Supported adapters:
+ * Intel Alder Lake (PCH)
+ * Intel Raptor Lake (PCH)
+ * Intel Meteor Lake (SOC and PCH)
++ * Intel Birch Stream (SOC)
+
+ Datasheets: Publicly available at the Intel website
+
+diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
+index a395df9c27513..008e560e12b58 100644
+--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
++++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
+@@ -683,6 +683,12 @@ the software port.
+ time protocol.
+ - Error
+
++ * - `ptp_cq[i]_late_cqe`
++ - Number of times a CQE has been delivered on the PTP timestamping CQ when
++ the CQE was not expected since a certain amount of time had elapsed where
++ the device typically ensures not posting the CQE.
++ - Error
++
+ .. [#ring_global] The corresponding ring and global counters do not share the
+ same name (i.e. do not follow the common naming scheme).
+
+diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst
+deleted file mode 100644
+index a4edf908b707c..0000000000000
+--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst
++++ /dev/null
+@@ -1,313 +0,0 @@
+-.. SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+-.. include:: <isonum.txt>
+-
+-=======
+-Devlink
+-=======
+-
+-:Copyright: |copy| 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+-
+-Contents
+-========
+-
+-- `Info`_
+-- `Parameters`_
+-- `Health reporters`_
+-
+-Info
+-====
+-
+-The devlink info reports the running and stored firmware versions on device.
+-It also prints the device PSID which represents the HCA board type ID.
+-
+-User command example::
+-
+- $ devlink dev info pci/0000:00:06.0
+- pci/0000:00:06.0:
+- driver mlx5_core
+- versions:
+- fixed:
+- fw.psid MT_0000000009
+- running:
+- fw.version 16.26.0100
+- stored:
+- fw.version 16.26.0100
+-
+-Parameters
+-==========
+-
+-flow_steering_mode: Device flow steering mode
+----------------------------------------------
+-The flow steering mode parameter controls the flow steering mode of the driver.
+-Two modes are supported:
+-
+-1. 'dmfs' - Device managed flow steering.
+-2. 'smfs' - Software/Driver managed flow steering.
+-
+-In DMFS mode, the HW steering entities are created and managed through the
+-Firmware.
+-In SMFS mode, the HW steering entities are created and managed though by
+-the driver directly into hardware without firmware intervention.
+-
+-SMFS mode is faster and provides better rule insertion rate compared to default DMFS mode.
+-
+-User command examples:
+-
+-- Set SMFS flow steering mode::
+-
+- $ devlink dev param set pci/0000:06:00.0 name flow_steering_mode value "smfs" cmode runtime
+-
+-- Read device flow steering mode::
+-
+- $ devlink dev param show pci/0000:06:00.0 name flow_steering_mode
+- pci/0000:06:00.0:
+- name flow_steering_mode type driver-specific
+- values:
+- cmode runtime value smfs
+-
+-enable_roce: RoCE enablement state
+-----------------------------------
+-If the device supports RoCE disablement, RoCE enablement state controls device
+-support for RoCE capability. Otherwise, the control occurs in the driver stack.
+-When RoCE is disabled at the driver level, only raw ethernet QPs are supported.
+-
+-To change RoCE enablement state, a user must change the driverinit cmode value
+-and run devlink reload.
+-
+-User command examples:
+-
+-- Disable RoCE::
+-
+- $ devlink dev param set pci/0000:06:00.0 name enable_roce value false cmode driverinit
+- $ devlink dev reload pci/0000:06:00.0
+-
+-- Read RoCE enablement state::
+-
+- $ devlink dev param show pci/0000:06:00.0 name enable_roce
+- pci/0000:06:00.0:
+- name enable_roce type generic
+- values:
+- cmode driverinit value true
+-
+-esw_port_metadata: Eswitch port metadata state
+-----------------------------------------------
+-When applicable, disabling eswitch metadata can increase packet rate
+-up to 20% depending on the use case and packet sizes.
+-
+-Eswitch port metadata state controls whether to internally tag packets with
+-metadata. Metadata tagging must be enabled for multi-port RoCE, failover
+-between representors and stacked devices.
+-By default metadata is enabled on the supported devices in E-switch.
+-Metadata is applicable only for E-switch in switchdev mode and
+-users may disable it when NONE of the below use cases will be in use:
+-
+-1. HCA is in Dual/multi-port RoCE mode.
+-2. VF/SF representor bonding (Usually used for Live migration)
+-3. Stacked devices
+-
+-When metadata is disabled, the above use cases will fail to initialize if
+-users try to enable them.
+-
+-- Show eswitch port metadata::
+-
+- $ devlink dev param show pci/0000:06:00.0 name esw_port_metadata
+- pci/0000:06:00.0:
+- name esw_port_metadata type driver-specific
+- values:
+- cmode runtime value true
+-
+-- Disable eswitch port metadata::
+-
+- $ devlink dev param set pci/0000:06:00.0 name esw_port_metadata value false cmode runtime
+-
+-- Change eswitch mode to switchdev mode where after choosing the metadata value::
+-
+- $ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
+-
+-hairpin_num_queues: Number of hairpin queues
+---------------------------------------------
+-We refer to a TC NIC rule that involves forwarding as "hairpin".
+-
+-Hairpin queues are mlx5 hardware specific implementation for hardware
+-forwarding of such packets.
+-
+-- Show the number of hairpin queues::
+-
+- $ devlink dev param show pci/0000:06:00.0 name hairpin_num_queues
+- pci/0000:06:00.0:
+- name hairpin_num_queues type driver-specific
+- values:
+- cmode driverinit value 2
+-
+-- Change the number of hairpin queues::
+-
+- $ devlink dev param set pci/0000:06:00.0 name hairpin_num_queues value 4 cmode driverinit
+-
+-hairpin_queue_size: Size of the hairpin queues
+-----------------------------------------------
+-Control the size of the hairpin queues.
+-
+-- Show the size of the hairpin queues::
+-
+- $ devlink dev param show pci/0000:06:00.0 name hairpin_queue_size
+- pci/0000:06:00.0:
+- name hairpin_queue_size type driver-specific
+- values:
+- cmode driverinit value 1024
+-
+-- Change the size (in packets) of the hairpin queues::
+-
+- $ devlink dev param set pci/0000:06:00.0 name hairpin_queue_size value 512 cmode driverinit
+-
+-Health reporters
+-================
+-
+-tx reporter
+------------
+-The tx reporter is responsible for reporting and recovering of the following two error scenarios:
+-
+-- tx timeout
+- Report on kernel tx timeout detection.
+- Recover by searching lost interrupts.
+-- tx error completion
+- Report on error tx completion.
+- Recover by flushing the tx queue and reset it.
+-
+-tx reporter also support on demand diagnose callback, on which it provides
+-real time information of its send queues status.
+-
+-User commands examples:
+-
+-- Diagnose send queues status::
+-
+- $ devlink health diagnose pci/0000:82:00.0 reporter tx
+-
+-.. note::
+- This command has valid output only when interface is up, otherwise the command has empty output.
+-
+-- Show number of tx errors indicated, number of recover flows ended successfully,
+- is autorecover enabled and graceful period from last recover::
+-
+- $ devlink health show pci/0000:82:00.0 reporter tx
+-
+-rx reporter
+------------
+-The rx reporter is responsible for reporting and recovering of the following two error scenarios:
+-
+-- rx queues' initialization (population) timeout
+- Population of rx queues' descriptors on ring initialization is done
+- in napi context via triggering an irq. In case of a failure to get
+- the minimum amount of descriptors, a timeout would occur, and
+- descriptors could be recovered by polling the EQ (Event Queue).
+-- rx completions with errors (reported by HW on interrupt context)
+- Report on rx completion error.
+- Recover (if needed) by flushing the related queue and reset it.
+-
+-rx reporter also supports on demand diagnose callback, on which it
+-provides real time information of its receive queues' status.
+-
+-- Diagnose rx queues' status and corresponding completion queue::
+-
+- $ devlink health diagnose pci/0000:82:00.0 reporter rx
+-
+-NOTE: This command has valid output only when interface is up. Otherwise, the command has empty output.
+-
+-- Show number of rx errors indicated, number of recover flows ended successfully,
+- is autorecover enabled, and graceful period from last recover::
+-
+- $ devlink health show pci/0000:82:00.0 reporter rx
+-
+-fw reporter
+------------
+-The fw reporter implements `diagnose` and `dump` callbacks.
+-It follows symptoms of fw error such as fw syndrome by triggering
+-fw core dump and storing it into the dump buffer.
+-The fw reporter diagnose command can be triggered any time by the user to check
+-current fw status.
+-
+-User commands examples:
+-
+-- Check fw heath status::
+-
+- $ devlink health diagnose pci/0000:82:00.0 reporter fw
+-
+-- Read FW core dump if already stored or trigger new one::
+-
+- $ devlink health dump show pci/0000:82:00.0 reporter fw
+-
+-.. note::
+- This command can run only on the PF which has fw tracer ownership,
+- running it on other PF or any VF will return "Operation not permitted".
+-
+-fw fatal reporter
+------------------
+-The fw fatal reporter implements `dump` and `recover` callbacks.
+-It follows fatal errors indications by CR-space dump and recover flow.
+-The CR-space dump uses vsc interface which is valid even if the FW command
+-interface is not functional, which is the case in most FW fatal errors.
+-The recover function runs recover flow which reloads the driver and triggers fw
+-reset if needed.
+-On firmware error, the health buffer is dumped into the dmesg. The log
+-level is derived from the error's severity (given in health buffer).
+-
+-User commands examples:
+-
+-- Run fw recover flow manually::
+-
+- $ devlink health recover pci/0000:82:00.0 reporter fw_fatal
+-
+-- Read FW CR-space dump if already stored or trigger new one::
+-
+- $ devlink health dump show pci/0000:82:00.1 reporter fw_fatal
+-
+-.. note::
+- This command can run only on PF.
+-
+-vnic reporter
+--------------
+-The vnic reporter implements only the `diagnose` callback.
+-It is responsible for querying the vnic diagnostic counters from fw and displaying
+-them in realtime.
+-
+-Description of the vnic counters:
+-
+-- total_q_under_processor_handle
+- number of queues in an error state due to
+- an async error or errored command.
+-- send_queue_priority_update_flow
+- number of QP/SQ priority/SL update events.
+-- cq_overrun
+- number of times CQ entered an error state due to an overflow.
+-- async_eq_overrun
+- number of times an EQ mapped to async events was overrun.
+- comp_eq_overrun number of times an EQ mapped to completion events was
+- overrun.
+-- quota_exceeded_command
+- number of commands issued and failed due to quota exceeded.
+-- invalid_command
+- number of commands issued and failed dues to any reason other than quota
+- exceeded.
+-- nic_receive_steering_discard
+- number of packets that completed RX flow
+- steering but were discarded due to a mismatch in flow table.
+-- generated_pkt_steering_fail
+- number of packets generated by the VNIC experiencing unexpected steering
+- failure (at any point in steering flow).
+-- handled_pkt_steering_fail
+- number of packets handled by the VNIC experiencing unexpected steering
+- failure (at any point in steering flow owned by the VNIC, including the FDB
+- for the eswitch owner).
+-
+-User commands examples:
+-
+-- Diagnose PF/VF vnic counters::
+-
+- $ devlink health diagnose pci/0000:82:00.1 reporter vnic
+-
+-- Diagnose representor vnic counters (performed by supplying devlink port of the
+- representor, which can be obtained via devlink port command)::
+-
+- $ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic
+-
+-.. note::
+- This command can run over all interfaces such as PF/VF and representor ports.
+diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst
+index 3fdcd6b61ccfa..581a91caa5795 100644
+--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst
++++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst
+@@ -13,7 +13,6 @@ Contents:
+ :maxdepth: 2
+
+ kconfig
+- devlink
+ switchdev
+ tracepoints
+ counters
+diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst
+index 202798d6501e7..702f204a3dbd3 100644
+--- a/Documentation/networking/devlink/mlx5.rst
++++ b/Documentation/networking/devlink/mlx5.rst
+@@ -18,6 +18,11 @@ Parameters
+ * - ``enable_roce``
+ - driverinit
+ - Type: Boolean
++
++ If the device supports RoCE disablement, RoCE enablement state controls
++ device support for RoCE capability. Otherwise, the control occurs in the
++ driver stack. When RoCE is disabled at the driver level, only raw
++ ethernet QPs are supported.
+ * - ``io_eq_size``
+ - driverinit
+ - The range is between 64 and 4096.
+@@ -48,6 +53,9 @@ parameters.
+ * ``smfs`` Software managed flow steering. In SMFS mode, the HW
+ steering entities are created and manage through the driver without
+ firmware intervention.
++
++ SMFS mode is faster and provides better rule insertion rate compared to
++ default DMFS mode.
+ * - ``fdb_large_groups``
+ - u32
+ - driverinit
+@@ -71,7 +79,24 @@ parameters.
+ deprecated.
+
+ Default: disabled
++ * - ``esw_port_metadata``
++ - Boolean
++ - runtime
++ - When applicable, disabling eswitch metadata can increase packet rate up
++ to 20% depending on the use case and packet sizes.
++
++ Eswitch port metadata state controls whether to internally tag packets
++ with metadata. Metadata tagging must be enabled for multi-port RoCE,
++ failover between representors and stacked devices. By default metadata is
++ enabled on the supported devices in E-switch. Metadata is applicable only
++ for E-switch in switchdev mode and users may disable it when NONE of the
++ below use cases will be in use:
++ 1. HCA is in Dual/multi-port RoCE mode.
++ 2. VF/SF representor bonding (Usually used for Live migration)
++ 3. Stacked devices
+
++ When metadata is disabled, the above use cases will fail to initialize if
++ users try to enable them.
+ * - ``hairpin_num_queues``
+ - u32
+ - driverinit
+@@ -104,3 +129,160 @@ The ``mlx5`` driver reports the following versions
+ * - ``fw.version``
+ - stored, running
+ - Three digit major.minor.subminor firmware version number.
++
++Health reporters
++================
++
++tx reporter
++-----------
++The tx reporter is responsible for reporting and recovering of the following three error scenarios:
++
++- tx timeout
++ Report on kernel tx timeout detection.
++ Recover by searching lost interrupts.
++- tx error completion
++ Report on error tx completion.
++ Recover by flushing the tx queue and reset it.
++- tx PTP port timestamping CQ unhealthy
++ Report too many CQEs never delivered on port ts CQ.
++ Recover by flushing and re-creating all PTP channels.
++
++tx reporter also support on demand diagnose callback, on which it provides
++real time information of its send queues status.
++
++User commands examples:
++
++- Diagnose send queues status::
++
++ $ devlink health diagnose pci/0000:82:00.0 reporter tx
++
++.. note::
++ This command has valid output only when interface is up, otherwise the command has empty output.
++
++- Show number of tx errors indicated, number of recover flows ended successfully,
++ is autorecover enabled and graceful period from last recover::
++
++ $ devlink health show pci/0000:82:00.0 reporter tx
++
++rx reporter
++-----------
++The rx reporter is responsible for reporting and recovering of the following two error scenarios:
++
++- rx queues' initialization (population) timeout
++ Population of rx queues' descriptors on ring initialization is done
++ in napi context via triggering an irq. In case of a failure to get
++ the minimum amount of descriptors, a timeout would occur, and
++ descriptors could be recovered by polling the EQ (Event Queue).
++- rx completions with errors (reported by HW on interrupt context)
++ Report on rx completion error.
++ Recover (if needed) by flushing the related queue and reset it.
++
++rx reporter also supports on demand diagnose callback, on which it
++provides real time information of its receive queues' status.
++
++- Diagnose rx queues' status and corresponding completion queue::
++
++ $ devlink health diagnose pci/0000:82:00.0 reporter rx
++
++.. note::
++ This command has valid output only when interface is up. Otherwise, the command has empty output.
++
++- Show number of rx errors indicated, number of recover flows ended successfully,
++ is autorecover enabled, and graceful period from last recover::
++
++ $ devlink health show pci/0000:82:00.0 reporter rx
++
++fw reporter
++-----------
++The fw reporter implements `diagnose` and `dump` callbacks.
++It follows symptoms of fw error such as fw syndrome by triggering
++fw core dump and storing it into the dump buffer.
++The fw reporter diagnose command can be triggered any time by the user to check
++current fw status.
++
++User commands examples:
++
++- Check fw heath status::
++
++ $ devlink health diagnose pci/0000:82:00.0 reporter fw
++
++- Read FW core dump if already stored or trigger new one::
++
++ $ devlink health dump show pci/0000:82:00.0 reporter fw
++
++.. note::
++ This command can run only on the PF which has fw tracer ownership,
++ running it on other PF or any VF will return "Operation not permitted".
++
++fw fatal reporter
++-----------------
++The fw fatal reporter implements `dump` and `recover` callbacks.
++It follows fatal errors indications by CR-space dump and recover flow.
++The CR-space dump uses vsc interface which is valid even if the FW command
++interface is not functional, which is the case in most FW fatal errors.
++The recover function runs recover flow which reloads the driver and triggers fw
++reset if needed.
++On firmware error, the health buffer is dumped into the dmesg. The log
++level is derived from the error's severity (given in health buffer).
++
++User commands examples:
++
++- Run fw recover flow manually::
++
++ $ devlink health recover pci/0000:82:00.0 reporter fw_fatal
++
++- Read FW CR-space dump if already stored or trigger new one::
++
++ $ devlink health dump show pci/0000:82:00.1 reporter fw_fatal
++
++.. note::
++ This command can run only on PF.
++
++vnic reporter
++-------------
++The vnic reporter implements only the `diagnose` callback.
++It is responsible for querying the vnic diagnostic counters from fw and displaying
++them in realtime.
++
++Description of the vnic counters:
++
++- total_q_under_processor_handle
++ number of queues in an error state due to
++ an async error or errored command.
++- send_queue_priority_update_flow
++ number of QP/SQ priority/SL update events.
++- cq_overrun
++ number of times CQ entered an error state due to an overflow.
++- async_eq_overrun
++ number of times an EQ mapped to async events was overrun.
++ comp_eq_overrun number of times an EQ mapped to completion events was
++ overrun.
++- quota_exceeded_command
++ number of commands issued and failed due to quota exceeded.
++- invalid_command
++ number of commands issued and failed dues to any reason other than quota
++ exceeded.
++- nic_receive_steering_discard
++ number of packets that completed RX flow
++ steering but were discarded due to a mismatch in flow table.
++- generated_pkt_steering_fail
++ number of packets generated by the VNIC experiencing unexpected steering
++ failure (at any point in steering flow).
++- handled_pkt_steering_fail
++ number of packets handled by the VNIC experiencing unexpected steering
++ failure (at any point in steering flow owned by the VNIC, including the FDB
++ for the eswitch owner).
++
++User commands examples:
++
++- Diagnose PF/VF vnic counters::
++
++ $ devlink health diagnose pci/0000:82:00.1 reporter vnic
++
++- Diagnose representor vnic counters (performed by supplying devlink port of the
++ representor, which can be obtained via devlink port command)::
++
++ $ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic
++
++.. note::
++ This command can run over all interfaces such as PF/VF and representor ports.
+diff --git a/Makefile b/Makefile
+index a6e152146028a..80e2b991dd0f3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 5
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+
+diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
+index 58e039a851af0..3c82975d46db3 100644
+--- a/arch/arm/include/asm/exception.h
++++ b/arch/arm/include/asm/exception.h
+@@ -10,10 +10,6 @@
+
+ #include <linux/interrupt.h>
+
+-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ #define __exception_irq_entry __irq_entry
+-#else
+-#define __exception_irq_entry
+-#endif
+
+ #endif /* __ASM_ARM_EXCEPTION_H */
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 73085b30b3092..9e0a2453b87d6 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1369,6 +1369,8 @@ choice
+ config CPU_BIG_ENDIAN
+ bool "Build big-endian kernel"
+ depends on !LD_IS_LLD || LLD_VERSION >= 130000
++ # https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
++ depends on AS_IS_GNU || AS_VERSION >= 150000
+ help
+ Say Y if you plan on running a kernel with a big-endian userspace.
+
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+index d2f5345d05600..717288bbdb8b6 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+@@ -1186,26 +1186,34 @@
+ dma-coherent;
+ };
+
+- usb0: usb@3100000 {
+- status = "disabled";
+- compatible = "snps,dwc3";
+- reg = <0x0 0x3100000 0x0 0x10000>;
+- interrupts = <0 80 0x4>; /* Level high type */
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
+- };
++ bus: bus {
++ #address-cells = <2>;
++ #size-cells = <2>;
++ compatible = "simple-bus";
++ ranges;
++ dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x00000000>;
++
++ usb0: usb@3100000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3100000 0x0 0x10000>;
++ interrupts = <0 80 0x4>; /* Level high type */
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ status = "disabled";
++ };
+
+- usb1: usb@3110000 {
+- status = "disabled";
+- compatible = "snps,dwc3";
+- reg = <0x0 0x3110000 0x0 0x10000>;
+- interrupts = <0 81 0x4>; /* Level high type */
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ usb1: usb@3110000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3110000 0x0 0x10000>;
++ interrupts = <0 81 0x4>; /* Level high type */
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ status = "disabled";
++ };
+ };
+
+ ccn@4000000 {
+diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+index 8bfc2db44624a..e40c55adff23d 100644
+--- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+@@ -135,7 +135,7 @@
+ reg = <0x0 0x4a800000 0x0 0x100000>;
+ no-map;
+
+- hwlocks = <&tcsr_mutex 0>;
++ hwlocks = <&tcsr_mutex 3>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+index 7355f266742aa..cdd7690132734 100644
+--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+@@ -207,7 +207,7 @@
+ smem {
+ compatible = "qcom,smem";
+ memory-region = <&smem_region>;
+- hwlocks = <&tcsr_mutex 0>;
++ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ soc: soc@0 {
+@@ -389,7 +389,7 @@
+
+ tcsr_mutex: hwlock@1905000 {
+ compatible = "qcom,ipq6018-tcsr-mutex", "qcom,tcsr-mutex";
+- reg = <0x0 0x01905000 0x0 0x1000>;
++ reg = <0x0 0x01905000 0x0 0x20000>;
+ #hwlock-cells = <1>;
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+index 00ed71936b472..92fd924bbdbe5 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+@@ -101,7 +101,7 @@
+ reg = <0x0 0x4ab00000 0x0 0x100000>;
+ no-map;
+
+- hwlocks = <&tcsr_mutex 0>;
++ hwlocks = <&tcsr_mutex 3>;
+ };
+
+ memory@4ac00000 {
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index f120c7c523517..cbe07fd44788d 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -174,7 +174,7 @@
+ smem@4aa00000 {
+ compatible = "qcom,smem";
+ reg = <0x0 0x4aa00000 0x0 0x100000>;
+- hwlocks = <&tcsr_mutex 0>;
++ hwlocks = <&tcsr_mutex 3>;
+ no-map;
+ };
+ };
+diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
+index bd69a4e7cd605..79200f21e1239 100644
+--- a/arch/arm64/kernel/module-plts.c
++++ b/arch/arm64/kernel/module-plts.c
+@@ -167,9 +167,6 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
+ switch (ELF64_R_TYPE(rela[i].r_info)) {
+ case R_AARCH64_JUMP26:
+ case R_AARCH64_CALL26:
+- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+- break;
+-
+ /*
+ * We only have to consider branch targets that resolve
+ * to symbols that are defined in a different section.
+@@ -269,9 +266,6 @@ static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
+ {
+ int i = 0, j = numrels - 1;
+
+- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+- return 0;
+-
+ while (i < j) {
+ if (branch_rela_needs_plt(syms, &rela[i], dstidx))
+ i++;
+diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
+index b9f567e660166..ed5da02b1cf6f 100644
+--- a/arch/loongarch/include/asm/percpu.h
++++ b/arch/loongarch/include/asm/percpu.h
+@@ -32,7 +32,7 @@ static inline void set_my_cpu_offset(unsigned long off)
+ #define __my_cpu_offset __my_cpu_offset
+
+ #define PERCPU_OP(op, asm_op, c_op) \
+-static inline unsigned long __percpu_##op(void *ptr, \
++static __always_inline unsigned long __percpu_##op(void *ptr, \
+ unsigned long val, int size) \
+ { \
+ unsigned long ret; \
+@@ -63,7 +63,7 @@ PERCPU_OP(and, and, &)
+ PERCPU_OP(or, or, |)
+ #undef PERCPU_OP
+
+-static inline unsigned long __percpu_read(void *ptr, int size)
++static __always_inline unsigned long __percpu_read(void *ptr, int size)
+ {
+ unsigned long ret;
+
+@@ -100,7 +100,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
+ return ret;
+ }
+
+-static inline void __percpu_write(void *ptr, unsigned long val, int size)
++static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
+ {
+ switch (size) {
+ case 1:
+@@ -132,8 +132,8 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
+ }
+ }
+
+-static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+- int size)
++static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
++ int size)
+ {
+ switch (size) {
+ case 1:
+diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
+index 7a90070136e82..8e38a86996fc6 100644
+--- a/arch/parisc/include/uapi/asm/pdc.h
++++ b/arch/parisc/include/uapi/asm/pdc.h
+@@ -472,6 +472,7 @@ struct pdc_model { /* for PDC_MODEL */
+ unsigned long arch_rev;
+ unsigned long pot_key;
+ unsigned long curr_key;
++ unsigned long width; /* default of PSW_W bit (1=enabled) */
+ };
+
+ struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index ae03b8679696e..ab23e61a6f016 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -36,6 +36,24 @@
+ .level 2.0
+ #endif
+
++/*
++ * We need seven instructions after a TLB insert for it to take effect.
++ * The PA8800/PA8900 processors are an exception and need 12 instructions.
++ * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one.
++ */
++#ifdef CONFIG_64BIT
++#define NUM_PIPELINE_INSNS 12
++#else
++#define NUM_PIPELINE_INSNS 7
++#endif
++
++ /* Insert num nops */
++ .macro insert_nops num
++ .rept \num
++ nop
++ .endr
++ .endm
++
+ /* Get aligned page_table_lock address for this mm from cr28/tr4 */
+ .macro get_ptl reg
+ mfctl %cr28,\reg
+@@ -415,24 +433,20 @@
+ 3:
+ .endm
+
+- /* Release page_table_lock without reloading lock address.
+- We use an ordered store to ensure all prior accesses are
+- performed prior to releasing the lock. */
+- .macro ptl_unlock0 spc,tmp,tmp2
++ /* Release page_table_lock if for user space. We use an ordered
++ store to ensure all prior accesses are performed prior to
++ releasing the lock. Note stw may not be executed, so we
++ provide one extra nop when CONFIG_TLB_PTLOCK is defined. */
++ .macro ptl_unlock spc,tmp,tmp2
+ #ifdef CONFIG_TLB_PTLOCK
+-98: ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
++98: get_ptl \tmp
++ ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
+ or,COND(=) %r0,\spc,%r0
+ stw,ma \tmp2,0(\tmp)
+ 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+-#endif
+- .endm
+-
+- /* Release page_table_lock. */
+- .macro ptl_unlock1 spc,tmp,tmp2
+-#ifdef CONFIG_TLB_PTLOCK
+-98: get_ptl \tmp
+- ptl_unlock0 \spc,\tmp,\tmp2
+-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
++ insert_nops NUM_PIPELINE_INSNS - 4
++#else
++ insert_nops NUM_PIPELINE_INSNS - 1
+ #endif
+ .endm
+
+@@ -461,13 +475,13 @@
+ * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
+ #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
+ #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
++ #define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
+
+ /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+ .macro convert_for_tlb_insert20 pte,tmp
+ #ifdef CONFIG_HUGETLB_PAGE
+ copy \pte,\tmp
+- extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
++ extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
+
+ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_SHIFT,\pte
+@@ -475,8 +489,7 @@
+ depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
+ #else /* Huge pages disabled */
+- extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
++ extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
+ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_SHIFT,\pte
+ #endif
+@@ -1124,7 +1137,7 @@ dtlb_miss_20w:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1133,6 +1146,7 @@ dtlb_check_alias_20w:
+
+ idtlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1150,7 +1164,7 @@ nadtlb_miss_20w:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1159,6 +1173,7 @@ nadtlb_check_alias_20w:
+
+ idtlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1184,7 +1199,7 @@ dtlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1194,6 +1209,7 @@ dtlb_check_alias_11:
+ idtlba pte,(va)
+ idtlbp prot,(va)
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1217,7 +1233,7 @@ nadtlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1227,6 +1243,7 @@ nadtlb_check_alias_11:
+ idtlba pte,(va)
+ idtlbp prot,(va)
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1246,7 +1263,7 @@ dtlb_miss_20:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1255,6 +1272,7 @@ dtlb_check_alias_20:
+
+ idtlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1274,7 +1292,7 @@ nadtlb_miss_20:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1283,6 +1301,7 @@ nadtlb_check_alias_20:
+
+ idtlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1319,7 +1338,7 @@ itlb_miss_20w:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1343,7 +1362,7 @@ naitlb_miss_20w:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1352,6 +1371,7 @@ naitlb_check_alias_20w:
+
+ iitlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1377,7 +1397,7 @@ itlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1401,7 +1421,7 @@ naitlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1411,6 +1431,7 @@ naitlb_check_alias_11:
+ iitlba pte,(%sr0, va)
+ iitlbp prot,(%sr0, va)
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1431,7 +1452,7 @@ itlb_miss_20:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1451,7 +1472,7 @@ naitlb_miss_20:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1460,6 +1481,7 @@ naitlb_check_alias_20:
+
+ iitlbt pte,prot
+
++ insert_nops NUM_PIPELINE_INSNS - 1
+ rfir
+ nop
+
+@@ -1481,7 +1503,7 @@ dbit_trap_20w:
+
+ idtlbt pte,prot
+
+- ptl_unlock0 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+ #else
+@@ -1507,7 +1529,7 @@ dbit_trap_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock0 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+
+@@ -1527,7 +1549,7 @@ dbit_trap_20:
+
+ idtlbt pte,prot
+
+- ptl_unlock0 spc,t0,t1
++ ptl_unlock spc,t0,t1
+ rfir
+ nop
+ #endif
+diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
+index fd15fd4bbb61b..5a7d43c0f469c 100644
+--- a/arch/parisc/kernel/head.S
++++ b/arch/parisc/kernel/head.S
+@@ -70,9 +70,8 @@ $bss_loop:
+ stw,ma %arg2,4(%r1)
+ stw,ma %arg3,4(%r1)
+
+-#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
+- /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
+- * and halt kernel if we detect a PA1.x CPU. */
++#if defined(CONFIG_PA20)
++ /* check for 64-bit capable CPU as required by current kernel */
+ ldi 32,%r10
+ mtctl %r10,%cr11
+ .level 2.0
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 8c1f7def596e4..10b946e9c6e75 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -1371,8 +1371,7 @@ static void power_pmu_disable(struct pmu *pmu)
+ /*
+ * Disable instruction sampling if it was enabled
+ */
+- if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE)
+- val &= ~MMCRA_SAMPLE_ENABLE;
++ val &= ~MMCRA_SAMPLE_ENABLE;
+
+ /* Disable BHRB via mmcra (BHRBRD) for p10 */
+ if (ppmu->flags & PPMU_ARCH_31)
+@@ -1383,7 +1382,7 @@ static void power_pmu_disable(struct pmu *pmu)
+ * instruction sampling or BHRB.
+ */
+ if (val != mmcra) {
+- mtspr(SPRN_MMCRA, mmcra);
++ mtspr(SPRN_MMCRA, val);
+ mb();
+ isync();
+ }
+diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
+index 113bdb151f687..40e26e9f318fd 100644
+--- a/arch/powerpc/platforms/powernv/opal-prd.c
++++ b/arch/powerpc/platforms/powernv/opal-prd.c
+@@ -24,13 +24,20 @@
+ #include <linux/uaccess.h>
+
+
++struct opal_prd_msg {
++ union {
++ struct opal_prd_msg_header header;
++ DECLARE_FLEX_ARRAY(u8, data);
++ };
++};
++
+ /*
+ * The msg member must be at the end of the struct, as it's followed by the
+ * message data.
+ */
+ struct opal_prd_msg_queue_item {
+- struct list_head list;
+- struct opal_prd_msg_header msg;
++ struct list_head list;
++ struct opal_prd_msg msg;
+ };
+
+ static struct device_node *prd_node;
+@@ -156,7 +163,7 @@ static ssize_t opal_prd_read(struct file *file, char __user *buf,
+ int rc;
+
+ /* we need at least a header's worth of data */
+- if (count < sizeof(item->msg))
++ if (count < sizeof(item->msg.header))
+ return -EINVAL;
+
+ if (*ppos)
+@@ -186,7 +193,7 @@ static ssize_t opal_prd_read(struct file *file, char __user *buf,
+ return -EINTR;
+ }
+
+- size = be16_to_cpu(item->msg.size);
++ size = be16_to_cpu(item->msg.header.size);
+ if (size > count) {
+ err = -EINVAL;
+ goto err_requeue;
+@@ -352,7 +359,7 @@ static int opal_prd_msg_notifier(struct notifier_block *nb,
+ if (!item)
+ return -ENOMEM;
+
+- memcpy(&item->msg, msg->params, msg_size);
++ memcpy(&item->msg.data, msg->params, msg_size);
+
+ spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
+ list_add_tail(&item->list, &opal_prd_msg_queue);
+diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
+index 61ba8ed43d8fe..36b955c762ba0 100644
+--- a/arch/riscv/include/asm/asm-prototypes.h
++++ b/arch/riscv/include/asm/asm-prototypes.h
+@@ -25,7 +25,6 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
+ DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
+ DECLARE_DO_ERROR_INFO(do_trap_break);
+
+-asmlinkage unsigned long get_overflow_stack(void);
+ asmlinkage void handle_bad_stack(struct pt_regs *regs);
+ asmlinkage void do_page_fault(struct pt_regs *regs);
+ asmlinkage void do_irq(struct pt_regs *regs);
+diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
+index 114bbadaef41e..bfb4c26f113c4 100644
+--- a/arch/riscv/include/asm/asm.h
++++ b/arch/riscv/include/asm/asm.h
+@@ -82,6 +82,28 @@
+ .endr
+ .endm
+
++#ifdef CONFIG_SMP
++#ifdef CONFIG_32BIT
++#define PER_CPU_OFFSET_SHIFT 2
++#else
++#define PER_CPU_OFFSET_SHIFT 3
++#endif
++
++.macro asm_per_cpu dst sym tmp
++ REG_L \tmp, TASK_TI_CPU_NUM(tp)
++ slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
++ la \dst, __per_cpu_offset
++ add \dst, \dst, \tmp
++ REG_L \tmp, 0(\dst)
++ la \dst, \sym
++ add \dst, \dst, \tmp
++.endm
++#else /* CONFIG_SMP */
++.macro asm_per_cpu dst sym tmp
++ la \dst, \sym
++.endm
++#endif /* CONFIG_SMP */
++
+ /* save all GPs except x1 ~ x5 */
+ .macro save_from_x6_to_x31
+ REG_S x6, PT_T1(sp)
+diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
+index 78936f4ff5133..7cad513538d8d 100644
+--- a/arch/riscv/include/asm/hwprobe.h
++++ b/arch/riscv/include/asm/hwprobe.h
+@@ -10,4 +10,9 @@
+
+ #define RISCV_HWPROBE_MAX_KEY 5
+
++static inline bool riscv_hwprobe_key_is_valid(__s64 key)
++{
++ return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
++}
++
+ #endif
+diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
+index b55ba20903ecc..53c00164c0421 100644
+--- a/arch/riscv/include/asm/page.h
++++ b/arch/riscv/include/asm/page.h
+@@ -33,8 +33,8 @@
+ #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+ #endif
+ /*
+- * By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so
+- * define the PAGE_OFFSET value for SV39.
++ * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
++ * define the PAGE_OFFSET value for SV48 and SV39.
+ */
+ #define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
+ #define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)
+diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
+index 1833beb00489c..d18ce0113ca1f 100644
+--- a/arch/riscv/include/asm/thread_info.h
++++ b/arch/riscv/include/asm/thread_info.h
+@@ -34,9 +34,6 @@
+
+ #ifndef __ASSEMBLY__
+
+-extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
+-extern unsigned long spin_shadow_stack;
+-
+ #include <asm/processor.h>
+ #include <asm/csr.h>
+
+diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
+index 14f5d27783b85..96b65a5396dfc 100644
+--- a/arch/riscv/include/asm/vdso/processor.h
++++ b/arch/riscv/include/asm/vdso/processor.h
+@@ -14,7 +14,7 @@ static inline void cpu_relax(void)
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+ #endif
+
+-#ifdef __riscv_zihintpause
++#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
+ /*
+ * Reduce instruction retirement.
+ * This assumes the PC changes.
+diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
+index d6a75aac1d27a..9f535d5de33f9 100644
+--- a/arch/riscv/kernel/asm-offsets.c
++++ b/arch/riscv/kernel/asm-offsets.c
+@@ -39,6 +39,7 @@ void asm_offsets(void)
+ OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
+ OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
+
++ OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
+ OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
+ OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
+ OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
+diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
+index 143a2bb3e6976..278d01d2911fd 100644
+--- a/arch/riscv/kernel/entry.S
++++ b/arch/riscv/kernel/entry.S
+@@ -10,9 +10,13 @@
+ #include <asm/asm.h>
+ #include <asm/csr.h>
+ #include <asm/unistd.h>
++#include <asm/page.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/errata_list.h>
++#include <linux/sizes.h>
++
++ .section .irqentry.text, "ax"
+
+ SYM_CODE_START(handle_exception)
+ /*
+@@ -170,67 +174,15 @@ SYM_CODE_END(ret_from_exception)
+
+ #ifdef CONFIG_VMAP_STACK
+ SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
+- /*
+- * Takes the psuedo-spinlock for the shadow stack, in case multiple
+- * harts are concurrently overflowing their kernel stacks. We could
+- * store any value here, but since we're overflowing the kernel stack
+- * already we only have SP to use as a scratch register. So we just
+- * swap in the address of the spinlock, as that's definately non-zero.
+- *
+- * Pairs with a store_release in handle_bad_stack().
+- */
+-1: la sp, spin_shadow_stack
+- REG_AMOSWAP_AQ sp, sp, (sp)
+- bnez sp, 1b
+-
+- la sp, shadow_stack
+- addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
+-
+- //save caller register to shadow stack
+- addi sp, sp, -(PT_SIZE_ON_STACK)
+- REG_S x1, PT_RA(sp)
+- REG_S x5, PT_T0(sp)
+- REG_S x6, PT_T1(sp)
+- REG_S x7, PT_T2(sp)
+- REG_S x10, PT_A0(sp)
+- REG_S x11, PT_A1(sp)
+- REG_S x12, PT_A2(sp)
+- REG_S x13, PT_A3(sp)
+- REG_S x14, PT_A4(sp)
+- REG_S x15, PT_A5(sp)
+- REG_S x16, PT_A6(sp)
+- REG_S x17, PT_A7(sp)
+- REG_S x28, PT_T3(sp)
+- REG_S x29, PT_T4(sp)
+- REG_S x30, PT_T5(sp)
+- REG_S x31, PT_T6(sp)
+-
+- la ra, restore_caller_reg
+- tail get_overflow_stack
+-
+-restore_caller_reg:
+- //save per-cpu overflow stack
+- REG_S a0, -8(sp)
+- //restore caller register from shadow_stack
+- REG_L x1, PT_RA(sp)
+- REG_L x5, PT_T0(sp)
+- REG_L x6, PT_T1(sp)
+- REG_L x7, PT_T2(sp)
+- REG_L x10, PT_A0(sp)
+- REG_L x11, PT_A1(sp)
+- REG_L x12, PT_A2(sp)
+- REG_L x13, PT_A3(sp)
+- REG_L x14, PT_A4(sp)
+- REG_L x15, PT_A5(sp)
+- REG_L x16, PT_A6(sp)
+- REG_L x17, PT_A7(sp)
+- REG_L x28, PT_T3(sp)
+- REG_L x29, PT_T4(sp)
+- REG_L x30, PT_T5(sp)
+- REG_L x31, PT_T6(sp)
++ /* we reach here from kernel context, sscratch must be 0 */
++ csrrw x31, CSR_SCRATCH, x31
++ asm_per_cpu sp, overflow_stack, x31
++ li x31, OVERFLOW_STACK_SIZE
++ add sp, sp, x31
++ /* zero out x31 again and restore x31 */
++ xor x31, x31, x31
++ csrrw x31, CSR_SCRATCH, x31
+
+- //load per-cpu overflow stack
+- REG_L sp, -8(sp)
+ addi sp, sp, -(PT_SIZE_ON_STACK)
+
+ //save context to overflow stack
+diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
+index 7441ac8a68436..8aaebe720e267 100644
+--- a/arch/riscv/kernel/probes/simulate-insn.c
++++ b/arch/riscv/kernel/probes/simulate-insn.c
+@@ -24,7 +24,7 @@ static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
+ unsigned long val)
+ {
+ if (index == 0)
+- return false;
++ return true;
+ else if (index <= 31)
+ *((unsigned long *)regs + index) = val;
+ else
+diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c
+index 194f166b2cc40..4b3dc8beaf77d 100644
+--- a/arch/riscv/kernel/probes/uprobes.c
++++ b/arch/riscv/kernel/probes/uprobes.c
+@@ -3,6 +3,7 @@
+ #include <linux/highmem.h>
+ #include <linux/ptrace.h>
+ #include <linux/uprobes.h>
++#include <asm/insn.h>
+
+ #include "decode-insn.h"
+
+@@ -17,6 +18,11 @@ bool is_swbp_insn(uprobe_opcode_t *insn)
+ #endif
+ }
+
++bool is_trap_insn(uprobe_opcode_t *insn)
++{
++ return riscv_insn_is_ebreak(*insn) || riscv_insn_is_c_ebreak(*insn);
++}
++
+ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+ {
+ return instruction_pointer(regs);
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index cd6f10c73a163..061117b8a3438 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -408,48 +408,14 @@ int is_valid_bugaddr(unsigned long pc)
+ #endif /* CONFIG_GENERIC_BUG */
+
+ #ifdef CONFIG_VMAP_STACK
+-/*
+- * Extra stack space that allows us to provide panic messages when the kernel
+- * has overflowed its stack.
+- */
+-static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
++DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
+ overflow_stack)__aligned(16);
+-/*
+- * A temporary stack for use by handle_kernel_stack_overflow. This is used so
+- * we can call into C code to get the per-hart overflow stack. Usage of this
+- * stack must be protected by spin_shadow_stack.
+- */
+-long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
+-
+-/*
+- * A pseudo spinlock to protect the shadow stack from being used by multiple
+- * harts concurrently. This isn't a real spinlock because the lock side must
+- * be taken without a valid stack and only a single register, it's only taken
+- * while in the process of panicing anyway so the performance and error
+- * checking a proper spinlock gives us doesn't matter.
+- */
+-unsigned long spin_shadow_stack;
+-
+-asmlinkage unsigned long get_overflow_stack(void)
+-{
+- return (unsigned long)this_cpu_ptr(overflow_stack) +
+- OVERFLOW_STACK_SIZE;
+-}
+
+ asmlinkage void handle_bad_stack(struct pt_regs *regs)
+ {
+ unsigned long tsk_stk = (unsigned long)current->stack;
+ unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
+
+- /*
+- * We're done with the shadow stack by this point, as we're on the
+- * overflow stack. Tell any other concurrent overflowing harts that
+- * they can proceed with panicing by releasing the pseudo-spinlock.
+- *
+- * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
+- */
+- smp_store_release(&spin_shadow_stack, 0);
+-
+ console_verbose();
+
+ pr_emerg("Insufficient stack space to handle exception!\n");
+diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c
+index d40bec6ac0786..cadf725ef7983 100644
+--- a/arch/riscv/kernel/vdso/hwprobe.c
++++ b/arch/riscv/kernel/vdso/hwprobe.c
+@@ -37,7 +37,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
+
+ /* This is something we can handle, fill out the pairs. */
+ while (p < end) {
+- if (p->key <= RISCV_HWPROBE_MAX_KEY) {
++ if (riscv_hwprobe_key_is_valid(p->key)) {
+ p->value = avd->all_cpu_hwprobe_values[p->key];
+
+ } else {
+diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
+index 20a9f991a6d74..e9090b38f8117 100644
+--- a/arch/riscv/mm/ptdump.c
++++ b/arch/riscv/mm/ptdump.c
+@@ -384,6 +384,9 @@ static int __init ptdump_init(void)
+
+ kernel_ptd_info.base_addr = KERN_VIRT_START;
+
++ pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
++ pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
++
+ for (i = 0; i < ARRAY_SIZE(pg_level); i++)
+ for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
+ pg_level[i].mask |= pte_bits[j].mask;
+diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
+index 1e2ea706aa228..00e7b0876dc50 100644
+--- a/arch/s390/mm/page-states.c
++++ b/arch/s390/mm/page-states.c
+@@ -121,7 +121,7 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
+ continue;
+ if (!pud_folded(*pud)) {
+ page = phys_to_page(pud_val(*pud));
+- for (i = 0; i < 3; i++)
++ for (i = 0; i < 4; i++)
+ set_bit(PG_arch_1, &page[i].flags);
+ }
+ mark_kernel_pmd(pud, addr, next);
+@@ -142,7 +142,7 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
+ continue;
+ if (!p4d_folded(*p4d)) {
+ page = phys_to_page(p4d_val(*p4d));
+- for (i = 0; i < 3; i++)
++ for (i = 0; i < 4; i++)
+ set_bit(PG_arch_1, &page[i].flags);
+ }
+ mark_kernel_pud(p4d, addr, next);
+@@ -164,7 +164,7 @@ static void mark_kernel_pgd(void)
+ continue;
+ if (!pgd_folded(*pgd)) {
+ page = phys_to_page(pgd_val(*pgd));
+- for (i = 0; i < 3; i++)
++ for (i = 0; i < 4; i++)
+ set_bit(PG_arch_1, &page[i].flags);
+ }
+ mark_kernel_p4d(pgd, addr, next);
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index 24a66670f5c3a..8e5a9f3e7404c 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -13,6 +13,7 @@
+ #include <linux/hugetlb.h>
+ #include <linux/slab.h>
+ #include <linux/sort.h>
++#include <asm/page-states.h>
+ #include <asm/cacheflush.h>
+ #include <asm/nospec-branch.h>
+ #include <asm/pgalloc.h>
+@@ -46,8 +47,11 @@ void *vmem_crst_alloc(unsigned long val)
+ unsigned long *table;
+
+ table = vmem_alloc_pages(CRST_ALLOC_ORDER);
+- if (table)
+- crst_table_init(table, val);
++ if (!table)
++ return NULL;
++ crst_table_init(table, val);
++ if (slab_is_available())
++ arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
+ return table;
+ }
+
+diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
+index 44340a1139e0b..959afa705e95c 100644
+--- a/arch/x86/crypto/sha1_ssse3_glue.c
++++ b/arch/x86/crypto/sha1_ssse3_glue.c
+@@ -24,8 +24,17 @@
+ #include <linux/types.h>
+ #include <crypto/sha1.h>
+ #include <crypto/sha1_base.h>
++#include <asm/cpu_device_id.h>
+ #include <asm/simd.h>
+
++static const struct x86_cpu_id module_cpu_ids[] = {
++ X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
++ X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
++ X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
++ {}
++};
++MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
++
+ static int sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len, sha1_block_fn *sha1_xform)
+ {
+@@ -301,6 +310,9 @@ static inline void unregister_sha1_ni(void) { }
+
+ static int __init sha1_ssse3_mod_init(void)
+ {
++ if (!x86_match_cpu(module_cpu_ids))
++ return -ENODEV;
++
+ if (register_sha1_ssse3())
+ goto fail;
+
+diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
+index 3a5f6be7dbba4..d25235f0ccafc 100644
+--- a/arch/x86/crypto/sha256_ssse3_glue.c
++++ b/arch/x86/crypto/sha256_ssse3_glue.c
+@@ -38,11 +38,20 @@
+ #include <crypto/sha2.h>
+ #include <crypto/sha256_base.h>
+ #include <linux/string.h>
++#include <asm/cpu_device_id.h>
+ #include <asm/simd.h>
+
+ asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
+ const u8 *data, int blocks);
+
++static const struct x86_cpu_id module_cpu_ids[] = {
++ X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
++ X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
++ X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
++ {}
++};
++MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
++
+ static int _sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len, sha256_block_fn *sha256_xform)
+ {
+@@ -366,6 +375,9 @@ static inline void unregister_sha256_ni(void) { }
+
+ static int __init sha256_ssse3_mod_init(void)
+ {
++ if (!x86_match_cpu(module_cpu_ids))
++ return -ENODEV;
++
+ if (register_sha256_ssse3())
+ goto fail;
+
+diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
+index e3054e3e46d52..9b419f0de713c 100644
+--- a/arch/x86/include/asm/kvm-x86-ops.h
++++ b/arch/x86/include/asm/kvm-x86-ops.h
+@@ -108,6 +108,7 @@ KVM_X86_OP_OPTIONAL(vcpu_blocking)
+ KVM_X86_OP_OPTIONAL(vcpu_unblocking)
+ KVM_X86_OP_OPTIONAL(pi_update_irte)
+ KVM_X86_OP_OPTIONAL(pi_start_assignment)
++KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
+ KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
+ KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
+ KVM_X86_OP_OPTIONAL(set_hv_timer)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index f72b30d2238a6..9bdbb1cc03d38 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1690,6 +1690,7 @@ struct kvm_x86_ops {
+ int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set);
+ void (*pi_start_assignment)(struct kvm *kvm);
++ void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
+ void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
+ bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
+
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index b37abb55e948b..389f9594746ef 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -553,6 +553,7 @@
+ #define MSR_AMD64_CPUID_FN_1 0xc0011004
+ #define MSR_AMD64_LS_CFG 0xc0011020
+ #define MSR_AMD64_DC_CFG 0xc0011022
++#define MSR_AMD64_TW_CFG 0xc0011023
+
+ #define MSR_AMD64_DE_CFG 0xc0011029
+ #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
+diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
+index e3bae2b60a0db..ef2844d691735 100644
+--- a/arch/x86/include/asm/numa.h
++++ b/arch/x86/include/asm/numa.h
+@@ -12,13 +12,6 @@
+
+ #define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
+
+-/*
+- * Too small node sizes may confuse the VM badly. Usually they
+- * result from BIOS bugs. So dont recognize nodes as standalone
+- * NUMA entities that have less than this amount of RAM listed:
+- */
+-#define NODE_MIN_SIZE (4*1024*1024)
+-
+ extern int numa_off;
+
+ /*
+diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
+index 35d5b8fb18efe..81f2a9ebea6f7 100644
+--- a/arch/x86/kernel/apic/msi.c
++++ b/arch/x86/kernel/apic/msi.c
+@@ -55,14 +55,14 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
+ * caused by the non-atomic update of the address/data pair.
+ *
+ * Direct update is possible when:
+- * - The MSI is maskable (remapped MSI does not use this code path)).
+- * The quirk bit is not set in this case.
++ * - The MSI is maskable (remapped MSI does not use this code path).
++ * The reservation mode bit is set in this case.
+ * - The new vector is the same as the old vector
+ * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
+ * - The interrupt is not yet started up
+ * - The new destination CPU is the same as the old destination CPU
+ */
+- if (!irqd_msi_nomask_quirk(irqd) ||
++ if (!irqd_can_reserve(irqd) ||
+ cfg->vector == old_cfg.vector ||
+ old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
+ !irqd_is_started(irqd) ||
+@@ -215,8 +215,6 @@ static bool x86_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ if (WARN_ON_ONCE(domain != real_parent))
+ return false;
+ info->chip->irq_set_affinity = msi_set_affinity;
+- /* See msi_set_affinity() for the gory details */
+- info->flags |= MSI_FLAG_NOMASK_QUIRK;
+ break;
+ case DOMAIN_BUS_DMAR:
+ case DOMAIN_BUS_AMDVI:
+diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
+index 5a2962c492d31..046f88066c9b4 100644
+--- a/arch/x86/kernel/cpu/hygon.c
++++ b/arch/x86/kernel/cpu/hygon.c
+@@ -86,8 +86,12 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
+ if (!err)
+ c->x86_coreid_bits = get_count_order(c->x86_max_cores);
+
+- /* Socket ID is ApicId[6] for these processors. */
+- c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
++ /*
++ * Socket ID is ApicId[6] for the processors with model <= 0x3
++ * when running on host.
++ */
++ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
++ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
+
+ cacheinfo_hygon_init_llc_id(c, cpu);
+ } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index b28fd020066f6..b4990c851ade3 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -727,10 +727,12 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
+
+ stimer_cleanup(stimer);
+ stimer->count = count;
+- if (stimer->count == 0)
+- stimer->config.enable = 0;
+- else if (stimer->config.auto_enable)
+- stimer->config.enable = 1;
++ if (!host) {
++ if (stimer->count == 0)
++ stimer->config.enable = 0;
++ else if (stimer->config.auto_enable)
++ stimer->config.enable = 1;
++ }
+
+ if (stimer->config.enable)
+ stimer_mark_pending(stimer, false);
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index e74e223f46aa3..90ad9cb6bd808 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2423,22 +2423,22 @@ EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
+ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
+ {
+ struct kvm_lapic *apic = vcpu->arch.apic;
+- u64 val;
+
+ /*
+- * ICR is a single 64-bit register when x2APIC is enabled. For legacy
+- * xAPIC, ICR writes need to go down the common (slightly slower) path
+- * to get the upper half from ICR2.
++ * ICR is a single 64-bit register when x2APIC is enabled, all others
++ * registers hold 32-bit values. For legacy xAPIC, ICR writes need to
++ * go down the common path to get the upper half from ICR2.
++ *
++ * Note, using the write helpers may incur an unnecessary write to the
++ * virtual APIC state, but KVM needs to conditionally modify the value
++ * in certain cases, e.g. to clear the ICR busy bit. The cost of extra
++ * conditional branches is likely a wash relative to the cost of the
++ * maybe-unecessary write, and both are in the noise anyways.
+ */
+- if (apic_x2apic_mode(apic) && offset == APIC_ICR) {
+- val = kvm_lapic_get_reg64(apic, APIC_ICR);
+- kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32));
+- trace_kvm_apic_write(APIC_ICR, val);
+- } else {
+- /* TODO: optimize to just emulate side effect w/o one more write */
+- val = kvm_lapic_get_reg(apic, offset);
+- kvm_lapic_reg_write(apic, offset, (u32)val);
+- }
++ if (apic_x2apic_mode(apic) && offset == APIC_ICR)
++ kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
++ else
++ kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
+ }
+ EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
+
+@@ -2649,6 +2649,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
+ u64 msr_val;
+ int i;
+
++ static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
++
+ if (!init_event) {
+ msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
+ if (kvm_vcpu_is_reset_bsp(vcpu))
+@@ -2960,6 +2962,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ int r;
+
++ static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
++
+ kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
+ /* set SPIV separately to get count of SW disabled APICs right */
+ apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index bc6f0fea48b43..52af279f793db 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6909,7 +6909,7 @@ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+ vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
+ }
+
+-static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
++static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+@@ -8275,7 +8275,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
+ .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
+ .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
+ .load_eoi_exitmap = vmx_load_eoi_exitmap,
+- .apicv_post_state_restore = vmx_apicv_post_state_restore,
++ .apicv_pre_state_restore = vmx_apicv_pre_state_restore,
+ .required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
+ .hwapic_irr_update = vmx_hwapic_irr_update,
+ .hwapic_isr_update = vmx_hwapic_isr_update,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 7bcf1a76a6abc..a5c8a01f7e7eb 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3643,6 +3643,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ case MSR_AMD64_PATCH_LOADER:
+ case MSR_AMD64_BU_CFG2:
+ case MSR_AMD64_DC_CFG:
++ case MSR_AMD64_TW_CFG:
+ case MSR_F15H_EX_CFG:
+ break;
+
+@@ -4067,6 +4068,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ case MSR_AMD64_BU_CFG2:
+ case MSR_IA32_PERF_CTL:
+ case MSR_AMD64_DC_CFG:
++ case MSR_AMD64_TW_CFG:
+ case MSR_F15H_EX_CFG:
+ /*
+ * Intel Sandy Bridge CPUs must support the RAPL (running average power
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index c01c5506fd4ae..aa39d678fe81d 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -602,13 +602,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
+ if (start >= end)
+ continue;
+
+- /*
+- * Don't confuse VM with a node that doesn't have the
+- * minimum amount of memory:
+- */
+- if (end && (end - start) < NODE_MIN_SIZE)
+- continue;
+-
+ alloc_node_data(nid);
+ }
+
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index e3ec02e6ac9fe..f347c20247d30 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -3,9 +3,11 @@
+ * Exceptions for specific devices. Usually work-arounds for fatal design flaws.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+ #include <linux/dmi.h>
+ #include <linux/pci.h>
++#include <linux/suspend.h>
+ #include <linux/vgaarb.h>
+ #include <asm/amd_nb.h>
+ #include <asm/hpet.h>
+@@ -904,3 +906,60 @@ static void chromeos_fixup_apl_pci_l1ss_capability(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_save_apl_pci_l1ss_capability);
+ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_fixup_apl_pci_l1ss_capability);
++
++#ifdef CONFIG_SUSPEND
++/*
++ * Root Ports on some AMD SoCs advertise PME_Support for D3hot and D3cold, but
++ * if the SoC is put into a hardware sleep state by the amd-pmc driver, the
++ * Root Ports don't generate wakeup interrupts for USB devices.
++ *
++ * When suspending, remove D3hot and D3cold from the PME_Support advertised
++ * by the Root Port so we don't use those states if we're expecting wakeup
++ * interrupts. Restore the advertised PME_Support when resuming.
++ */
++static void amd_rp_pme_suspend(struct pci_dev *dev)
++{
++ struct pci_dev *rp;
++
++ /*
++ * PM_SUSPEND_ON means we're doing runtime suspend, which means
++ * amd-pmc will not be involved so PMEs during D3 work as advertised.
++ *
++ * The PMEs *do* work if amd-pmc doesn't put the SoC in the hardware
++ * sleep state, but we assume amd-pmc is always present.
++ */
++ if (pm_suspend_target_state == PM_SUSPEND_ON)
++ return;
++
++ rp = pcie_find_root_port(dev);
++ if (!rp->pm_cap)
++ return;
++
++ rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >>
++ PCI_PM_CAP_PME_SHIFT);
++ dev_info_once(&rp->dev, "quirk: disabling D3cold for suspend\n");
++}
++
++static void amd_rp_pme_resume(struct pci_dev *dev)
++{
++ struct pci_dev *rp;
++ u16 pmc;
++
++ rp = pcie_find_root_port(dev);
++ if (!rp->pm_cap)
++ return;
++
++ pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc);
++ rp->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
++}
++/* Rembrandt (yellow_carp) */
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_resume);
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_resume);
++/* Phoenix (pink_sardine) */
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_resume);
++DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_suspend);
++DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_resume);
++#endif /* CONFIG_SUSPEND */
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index c21bc81a790ff..5fb31b9a16403 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2874,11 +2874,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
+ };
+ struct request *rq;
+
+- if (unlikely(bio_queue_enter(bio)))
+- return NULL;
+-
+ if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+- goto queue_exit;
++ return NULL;
+
+ rq_qos_throttle(q, bio);
+
+@@ -2894,35 +2891,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
+ rq_qos_cleanup(q, bio);
+ if (bio->bi_opf & REQ_NOWAIT)
+ bio_wouldblock_error(bio);
+-queue_exit:
+- blk_queue_exit(q);
+ return NULL;
+ }
+
+-static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+- struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
++/* return true if this @rq can be used for @bio */
++static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
++ struct bio *bio)
+ {
+- struct request *rq;
+- enum hctx_type type, hctx_type;
++ enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
++ enum hctx_type hctx_type = rq->mq_hctx->type;
+
+- if (!plug)
+- return NULL;
+- rq = rq_list_peek(&plug->cached_rq);
+- if (!rq || rq->q != q)
+- return NULL;
++ WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
+
+- if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
+- *bio = NULL;
+- return NULL;
+- }
+-
+- type = blk_mq_get_hctx_type((*bio)->bi_opf);
+- hctx_type = rq->mq_hctx->type;
+ if (type != hctx_type &&
+ !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
+- return NULL;
+- if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
+- return NULL;
++ return false;
++ if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
++ return false;
+
+ /*
+ * If any qos ->throttle() end up blocking, we will have flushed the
+@@ -2930,12 +2915,12 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+ * before we throttle.
+ */
+ plug->cached_rq = rq_list_next(rq);
+- rq_qos_throttle(q, *bio);
++ rq_qos_throttle(rq->q, bio);
+
+ blk_mq_rq_time_init(rq, 0);
+- rq->cmd_flags = (*bio)->bi_opf;
++ rq->cmd_flags = bio->bi_opf;
+ INIT_LIST_HEAD(&rq->queuelist);
+- return rq;
++ return true;
+ }
+
+ static void bio_set_ioprio(struct bio *bio)
+@@ -2965,7 +2950,7 @@ void blk_mq_submit_bio(struct bio *bio)
+ struct blk_plug *plug = blk_mq_plug(bio);
+ const int is_sync = op_is_sync(bio->bi_opf);
+ struct blk_mq_hw_ctx *hctx;
+- struct request *rq;
++ struct request *rq = NULL;
+ unsigned int nr_segs = 1;
+ blk_status_t ret;
+
+@@ -2976,20 +2961,36 @@ void blk_mq_submit_bio(struct bio *bio)
+ return;
+ }
+
+- if (!bio_integrity_prep(bio))
+- return;
+-
+ bio_set_ioprio(bio);
+
+- rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
+- if (!rq) {
+- if (!bio)
++ if (plug) {
++ rq = rq_list_peek(&plug->cached_rq);
++ if (rq && rq->q != q)
++ rq = NULL;
++ }
++ if (rq) {
++ if (!bio_integrity_prep(bio))
+ return;
+- rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+- if (unlikely(!rq))
++ if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
+ return;
++ if (blk_mq_can_use_cached_rq(rq, plug, bio))
++ goto done;
++ percpu_ref_get(&q->q_usage_counter);
++ } else {
++ if (unlikely(bio_queue_enter(bio)))
++ return;
++ if (!bio_integrity_prep(bio))
++ goto fail;
++ }
++
++ rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
++ if (unlikely(!rq)) {
++fail:
++ blk_queue_exit(q);
++ return;
+ }
+
++done:
+ trace_block_getrq(bio);
+
+ rq_qos_track(q, rq, bio);
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
+index 8c1d0ca412137..d0d954fe9d54f 100644
+--- a/crypto/pcrypt.c
++++ b/crypto/pcrypt.c
+@@ -117,6 +117,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
+ err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
+ if (!err)
+ return -EINPROGRESS;
++ if (err == -EBUSY)
++ return -EAGAIN;
+
+ return err;
+ }
+@@ -164,6 +166,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
+ err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
+ if (!err)
+ return -EINPROGRESS;
++ if (err == -EBUSY)
++ return -EAGAIN;
+
+ return err;
+ }
+diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c
+index a2056c4c8cb70..271092f2700a1 100644
+--- a/drivers/acpi/acpi_fpdt.c
++++ b/drivers/acpi/acpi_fpdt.c
+@@ -194,12 +194,19 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ record_header = (void *)subtable_header + offset;
+ offset += record_header->length;
+
++ if (!record_header->length) {
++ pr_err(FW_BUG "Zero-length record found in FPTD.\n");
++ result = -EINVAL;
++ goto err;
++ }
++
+ switch (record_header->type) {
+ case RECORD_S3_RESUME:
+ if (subtable_type != SUBTABLE_S3PT) {
+ pr_err(FW_BUG "Invalid record %d for subtable %s\n",
+ record_header->type, signature);
+- return -EINVAL;
++ result = -EINVAL;
++ goto err;
+ }
+ if (record_resume) {
+ pr_err("Duplicate resume performance record found.\n");
+@@ -208,7 +215,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ record_resume = (struct resume_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &resume_attr_group);
+ if (result)
+- return result;
++ goto err;
+ break;
+ case RECORD_S3_SUSPEND:
+ if (subtable_type != SUBTABLE_S3PT) {
+@@ -223,13 +230,14 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ record_suspend = (struct suspend_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &suspend_attr_group);
+ if (result)
+- return result;
++ goto err;
+ break;
+ case RECORD_BOOT:
+ if (subtable_type != SUBTABLE_FBPT) {
+ pr_err(FW_BUG "Invalid %d for subtable %s\n",
+ record_header->type, signature);
+- return -EINVAL;
++ result = -EINVAL;
++ goto err;
+ }
+ if (record_boot) {
+ pr_err("Duplicate boot performance record found.\n");
+@@ -238,7 +246,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ record_boot = (struct boot_performance_record *)record_header;
+ result = sysfs_create_group(fpdt_kobj, &boot_attr_group);
+ if (result)
+- return result;
++ goto err;
+ break;
+
+ default:
+@@ -247,6 +255,18 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
+ }
+ }
+ return 0;
++
++err:
++ if (record_boot)
++ sysfs_remove_group(fpdt_kobj, &boot_attr_group);
++
++ if (record_suspend)
++ sysfs_remove_group(fpdt_kobj, &suspend_attr_group);
++
++ if (record_resume)
++ sysfs_remove_group(fpdt_kobj, &resume_attr_group);
++
++ return result;
+ }
+
+ static int __init acpi_init_fpdt(void)
+@@ -255,6 +275,7 @@ static int __init acpi_init_fpdt(void)
+ struct acpi_table_header *header;
+ struct fpdt_subtable_entry *subtable;
+ u32 offset = sizeof(*header);
++ int result;
+
+ status = acpi_get_table(ACPI_SIG_FPDT, 0, &header);
+
+@@ -263,8 +284,8 @@ static int __init acpi_init_fpdt(void)
+
+ fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
+ if (!fpdt_kobj) {
+- acpi_put_table(header);
+- return -ENOMEM;
++ result = -ENOMEM;
++ goto err_nomem;
+ }
+
+ while (offset < header->length) {
+@@ -272,8 +293,10 @@ static int __init acpi_init_fpdt(void)
+ switch (subtable->type) {
+ case SUBTABLE_FBPT:
+ case SUBTABLE_S3PT:
+- fpdt_process_subtable(subtable->address,
++ result = fpdt_process_subtable(subtable->address,
+ subtable->type);
++ if (result)
++ goto err_subtable;
+ break;
+ default:
+ /* Other types are reserved in ACPI 6.4 spec. */
+@@ -282,6 +305,12 @@ static int __init acpi_init_fpdt(void)
+ offset += sizeof(*subtable);
+ }
+ return 0;
++err_subtable:
++ kobject_put(fpdt_kobj);
++
++err_nomem:
++ acpi_put_table(header);
++ return result;
+ }
+
+ fs_initcall(acpi_init_fpdt);
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index ef59d6ea16da0..63ad0541db381 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -209,6 +209,20 @@ err_pool_alloc:
+ return -ENOMEM;
+ }
+
++/**
++ * ghes_estatus_pool_region_free - free previously allocated memory
++ * from the ghes_estatus_pool.
++ * @addr: address of memory to free.
++ * @size: size of memory to free.
++ *
++ * Returns none.
++ */
++void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
++{
++ gen_pool_free(ghes_estatus_pool, addr, size);
++}
++EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
++
+ static int map_gen_v2(struct ghes *ghes)
+ {
+ return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
+@@ -564,6 +578,7 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+ pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+ unsigned int devfn;
+ int aer_severity;
++ u8 *aer_info;
+
+ devfn = PCI_DEVFN(pcie_err->device_id.device,
+ pcie_err->device_id.function);
+@@ -577,11 +592,17 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+ if (gdata->flags & CPER_SEC_RESET)
+ aer_severity = AER_FATAL;
+
++ aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
++ sizeof(struct aer_capability_regs));
++ if (!aer_info)
++ return;
++ memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
++
+ aer_recover_queue(pcie_err->device_id.segment,
+ pcie_err->device_id.bus,
+ devfn, aer_severity,
+ (struct aer_capability_regs *)
+- pcie_err->aer_info);
++ aer_info);
+ }
+ #endif
+ }
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index c95d0edb0be9e..a59c11df73754 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1924,6 +1924,16 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
+ },
+ },
++ {
++ /*
++ * HP 250 G7 Notebook PC
++ */
++ .callback = ec_honor_dsdt_gpe,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"),
++ },
++ },
+ {
+ /*
+ * Samsung hardware
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 297a88587031e..80fbd385e8b4f 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -495,6 +495,18 @@ static const struct dmi_system_id maingear_laptop[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
+ }
+ },
++ {
++ /* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
++ },
++ },
++ {
++ /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
++ },
++ },
+ {
+ .ident = "MAINGEAR Vector Pro 2 17",
+ .matches = {
+diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
+index 3241486869530..9bba8f280a4d4 100644
+--- a/drivers/atm/iphase.c
++++ b/drivers/atm/iphase.c
+@@ -2291,19 +2291,21 @@ static int get_esi(struct atm_dev *dev)
+ static int reset_sar(struct atm_dev *dev)
+ {
+ IADEV *iadev;
+- int i, error = 1;
++ int i, error;
+ unsigned int pci[64];
+
+ iadev = INPH_IA_DEV(dev);
+- for(i=0; i<64; i++)
+- if ((error = pci_read_config_dword(iadev->pci,
+- i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
+- return error;
++ for (i = 0; i < 64; i++) {
++ error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
++ if (error != PCIBIOS_SUCCESSFUL)
++ return error;
++ }
+ writel(0, iadev->reg+IPHASE5575_EXT_RESET);
+- for(i=0; i<64; i++)
+- if ((error = pci_write_config_dword(iadev->pci,
+- i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
+- return error;
++ for (i = 0; i < 64; i++) {
++ error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
++ if (error != PCIBIOS_SUCCESSFUL)
++ return error;
++ }
+ udelay(5);
+ return 0;
+ }
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index a528cec24264a..0c3725c3eefa4 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -1274,8 +1274,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
+ if (dev->bus && dev->bus->dma_cleanup)
+ dev->bus->dma_cleanup(dev);
+
+- device_links_driver_cleanup(dev);
+ device_unbind_cleanup(dev);
++ device_links_driver_cleanup(dev);
+
+ klist_remove(&dev->p->knode_driver);
+ device_pm_check_callbacks(dev);
+diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
+index 7d3e47436056e..d525f99830664 100644
+--- a/drivers/base/regmap/regcache.c
++++ b/drivers/base/regmap/regcache.c
+@@ -334,6 +334,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
+ return 0;
+ }
+
++static int rbtree_all(const void *key, const struct rb_node *node)
++{
++ return 0;
++}
++
+ /**
+ * regcache_sync - Sync the register cache with the hardware.
+ *
+@@ -351,6 +356,7 @@ int regcache_sync(struct regmap *map)
+ unsigned int i;
+ const char *name;
+ bool bypass;
++ struct rb_node *node;
+
+ if (WARN_ON(map->cache_type == REGCACHE_NONE))
+ return -EINVAL;
+@@ -392,6 +398,30 @@ out:
+ /* Restore the bypass state */
+ map->cache_bypass = bypass;
+ map->no_sync_defaults = false;
++
++ /*
++ * If we did any paging with cache bypassed and a cached
++ * paging register then the register and cache state might
++ * have gone out of sync, force writes of all the paging
++ * registers.
++ */
++ rb_for_each(node, 0, &map->range_tree, rbtree_all) {
++ struct regmap_range_node *this =
++ rb_entry(node, struct regmap_range_node, node);
++
++ /* If there's nothing in the cache there's nothing to sync */
++ ret = regcache_read(map, this->selector_reg, &i);
++ if (ret != 0)
++ continue;
++
++ ret = _regmap_write(map, this->selector_reg, i);
++ if (ret != 0) {
++ dev_err(map->dev, "Failed to write %x = %x: %d\n",
++ this->selector_reg, i, ret);
++ break;
++ }
++ }
++
+ map->unlock(map->lock_arg);
+
+ regmap_async_complete(map);
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 1fe011676d070..4a4b9bad551e8 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -1313,6 +1313,7 @@ static int virtblk_probe(struct virtio_device *vdev)
+ u16 min_io_size;
+ u8 physical_block_exp, alignment_offset;
+ unsigned int queue_depth;
++ size_t max_dma_size;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+@@ -1411,7 +1412,8 @@ static int virtblk_probe(struct virtio_device *vdev)
+ /* No real sector limit. */
+ blk_queue_max_hw_sectors(q, UINT_MAX);
+
+- max_size = virtio_max_dma_size(vdev);
++ max_dma_size = virtio_max_dma_size(vdev);
++ max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
+
+ /* Host can optionally specify maximum segment size and number of
+ * segments. */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index ca9e2a210fff2..ea29469fe0cff 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -2803,6 +2803,9 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
+ goto err_free_wc;
+ }
+
++ if (data->evt_skb == NULL)
++ goto err_free_wc;
++
+ /* Parse and handle the return WMT event */
+ wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
+ if (wmt_evt->whdr.op != hdr->op) {
+diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
+index c6f181702b9a7..edbc4d3381177 100644
+--- a/drivers/char/agp/parisc-agp.c
++++ b/drivers/char/agp/parisc-agp.c
+@@ -38,7 +38,7 @@ static struct _parisc_agp_info {
+
+ int lba_cap_offset;
+
+- u64 *gatt;
++ __le64 *gatt;
+ u64 gatt_entries;
+
+ u64 gart_base;
+@@ -104,7 +104,7 @@ parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
+ int i;
+
+ for (i = 0; i < info->gatt_entries; i++) {
+- info->gatt[i] = (unsigned long)agp_bridge->scratch_page;
++ info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
+ }
+
+ return 0;
+@@ -158,9 +158,9 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
+ for (k = 0;
+ k < info->io_pages_per_kpage;
+ k++, j++, paddr += info->io_page_size) {
+- info->gatt[j] =
++ info->gatt[j] = cpu_to_le64(
+ parisc_agp_mask_memory(agp_bridge,
+- paddr, type);
++ paddr, type));
+ asm_io_fdc(&info->gatt[j]);
+ }
+ }
+@@ -184,7 +184,7 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+ io_pg_start = info->io_pages_per_kpage * pg_start;
+ io_pg_count = info->io_pages_per_kpage * mem->page_count;
+ for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
+- info->gatt[i] = agp_bridge->scratch_page;
++ info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
+ }
+
+ agp_bridge->driver->tlb_flush(mem);
+@@ -204,7 +204,8 @@ parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
+ pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
+ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
+
+- return cpu_to_le64(pa);
++ /* return native (big-endian) PDIR entry */
++ return pa;
+ }
+
+ static void
+@@ -251,7 +252,8 @@ static int __init
+ agp_ioc_init(void __iomem *ioc_regs)
+ {
+ struct _parisc_agp_info *info = &parisc_agp_info;
+- u64 iova_base, *io_pdir, io_tlb_ps;
++ u64 iova_base, io_tlb_ps;
++ __le64 *io_pdir;
+ int io_tlb_shift;
+
+ printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");
+diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
+index 86b43175b0422..d05c589d52bf2 100644
+--- a/drivers/clk/qcom/gcc-ipq6018.c
++++ b/drivers/clk/qcom/gcc-ipq6018.c
+@@ -73,7 +73,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -87,7 +86,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -162,7 +160,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
+ &gpll6_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -193,7 +190,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ &gpll4_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -244,7 +240,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ &gpll2_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -275,7 +270,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
+ &nss_crypto_pll_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
+index 6541d98c03483..6ed4eba00731a 100644
+--- a/drivers/clk/qcom/gcc-ipq8074.c
++++ b/drivers/clk/qcom/gcc-ipq8074.c
+@@ -76,7 +76,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
+ &gpll0_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -122,7 +121,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
+ &gpll2_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -155,7 +153,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
+ &gpll4_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -189,7 +186,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
+ &gpll6_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -202,7 +198,6 @@ static struct clk_fixed_factor gpll6_out_main_div2 = {
+ &gpll6_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+@@ -267,7 +262,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
+ &nss_crypto_pll_main.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+- .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+
+diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
+index 75234e0783e1c..83fe4eb3133cb 100644
+--- a/drivers/clk/socfpga/stratix10-clk.h
++++ b/drivers/clk/socfpga/stratix10-clk.h
+@@ -7,8 +7,10 @@
+ #define __STRATIX10_CLK_H
+
+ struct stratix10_clock_data {
+- struct clk_hw_onecell_data clk_data;
+ void __iomem *base;
++
++ /* Must be last */
++ struct clk_hw_onecell_data clk_data;
+ };
+
+ struct stratix10_pll_clock {
+diff --git a/drivers/clk/visconti/pll.h b/drivers/clk/visconti/pll.h
+index 01d07f1bf01b1..c4bd40676da4b 100644
+--- a/drivers/clk/visconti/pll.h
++++ b/drivers/clk/visconti/pll.h
+@@ -15,8 +15,10 @@
+
+ struct visconti_pll_provider {
+ void __iomem *reg_base;
+- struct clk_hw_onecell_data clk_data;
+ struct device_node *node;
++
++ /* Must be last */
++ struct clk_hw_onecell_data clk_data;
+ };
+
+ #define VISCONTI_PLL_RATE(_rate, _dacen, _dsmen, \
+diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c
+index 27af17c995900..2a90c92a9182a 100644
+--- a/drivers/clocksource/timer-atmel-tcb.c
++++ b/drivers/clocksource/timer-atmel-tcb.c
+@@ -315,6 +315,7 @@ static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
+ writel(mck_divisor_idx /* likely divide-by-8 */
+ | ATMEL_TC_WAVE
+ | ATMEL_TC_WAVESEL_UP /* free-run */
++ | ATMEL_TC_ASWTRG_SET /* TIOA0 rises at software trigger */
+ | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
+ | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
+ tcaddr + ATMEL_TC_REG(0, CMR));
+diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
+index 28ab4f1a7c713..6a878d227a13b 100644
+--- a/drivers/clocksource/timer-imx-gpt.c
++++ b/drivers/clocksource/timer-imx-gpt.c
+@@ -434,12 +434,16 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
+ return -ENOMEM;
+
+ imxtm->base = of_iomap(np, 0);
+- if (!imxtm->base)
+- return -ENXIO;
++ if (!imxtm->base) {
++ ret = -ENXIO;
++ goto err_kfree;
++ }
+
+ imxtm->irq = irq_of_parse_and_map(np, 0);
+- if (imxtm->irq <= 0)
+- return -EINVAL;
++ if (imxtm->irq <= 0) {
++ ret = -EINVAL;
++ goto err_kfree;
++ }
+
+ imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
+
+@@ -452,11 +456,15 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
+
+ ret = _mxc_timer_init(imxtm);
+ if (ret)
+- return ret;
++ goto err_kfree;
+
+ initialized = 1;
+
+ return 0;
++
++err_kfree:
++ kfree(imxtm);
++ return ret;
+ }
+
+ static int __init imx1_timer_init_dt(struct device_node *np)
+diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
+index 55c7ffd37d1cc..4a7cce339fae8 100644
+--- a/drivers/cpufreq/cpufreq_stats.c
++++ b/drivers/cpufreq/cpufreq_stats.c
+@@ -131,23 +131,23 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+ len += sysfs_emit_at(buf, len, " From : To\n");
+ len += sysfs_emit_at(buf, len, " : ");
+ for (i = 0; i < stats->state_num; i++) {
+- if (len >= PAGE_SIZE)
++ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
+ }
+- if (len >= PAGE_SIZE)
+- return PAGE_SIZE;
++ if (len >= PAGE_SIZE - 1)
++ return PAGE_SIZE - 1;
+
+ len += sysfs_emit_at(buf, len, "\n");
+
+ for (i = 0; i < stats->state_num; i++) {
+- if (len >= PAGE_SIZE)
++ if (len >= PAGE_SIZE - 1)
+ break;
+
+ len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
+
+ for (j = 0; j < stats->state_num; j++) {
+- if (len >= PAGE_SIZE)
++ if (len >= PAGE_SIZE - 1)
+ break;
+
+ if (pending)
+@@ -157,12 +157,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+
+ len += sysfs_emit_at(buf, len, "%9u ", count);
+ }
+- if (len >= PAGE_SIZE)
++ if (len >= PAGE_SIZE - 1)
+ break;
+ len += sysfs_emit_at(buf, len, "\n");
+ }
+
+- if (len >= PAGE_SIZE) {
++ if (len >= PAGE_SIZE - 1) {
+ pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
+ return -EFBIG;
+ }
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index ba4852744c052..2aec118ba6775 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -845,6 +845,8 @@ static void qm_poll_req_cb(struct hisi_qp *qp)
+ qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
+ qp->qp_status.cq_head, 0);
+ atomic_dec(&qp->qp_status.used);
++
++ cond_resched();
+ }
+
+ /* set c_flag */
+diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
+index 2c6001592fe20..6a75a3cb601ec 100644
+--- a/drivers/cxl/core/port.c
++++ b/drivers/cxl/core/port.c
+@@ -1242,35 +1242,39 @@ static struct device *grandparent(struct device *dev)
+ return NULL;
+ }
+
++static struct device *endpoint_host(struct cxl_port *endpoint)
++{
++ struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
++
++ if (is_cxl_root(port))
++ return port->uport_dev;
++ return &port->dev;
++}
++
+ static void delete_endpoint(void *data)
+ {
+ struct cxl_memdev *cxlmd = data;
+ struct cxl_port *endpoint = cxlmd->endpoint;
+- struct cxl_port *parent_port;
+- struct device *parent;
+-
+- parent_port = cxl_mem_find_port(cxlmd, NULL);
+- if (!parent_port)
+- goto out;
+- parent = &parent_port->dev;
++ struct device *host = endpoint_host(endpoint);
+
+- device_lock(parent);
+- if (parent->driver && !endpoint->dead) {
+- devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
+- devm_release_action(parent, cxl_unlink_uport, endpoint);
+- devm_release_action(parent, unregister_port, endpoint);
++ device_lock(host);
++ if (host->driver && !endpoint->dead) {
++ devm_release_action(host, cxl_unlink_parent_dport, endpoint);
++ devm_release_action(host, cxl_unlink_uport, endpoint);
++ devm_release_action(host, unregister_port, endpoint);
+ }
+ cxlmd->endpoint = NULL;
+- device_unlock(parent);
+- put_device(parent);
+-out:
++ device_unlock(host);
+ put_device(&endpoint->dev);
++ put_device(host);
+ }
+
+ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
+ {
++ struct device *host = endpoint_host(endpoint);
+ struct device *dev = &cxlmd->dev;
+
++ get_device(host);
+ get_device(&endpoint->dev);
+ cxlmd->endpoint = endpoint;
+ cxlmd->depth = endpoint->depth;
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index a25f5deb3de51..c7e70ccdb9ef0 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -1127,7 +1127,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ }
+
+ if (is_cxl_root(parent_port)) {
+- parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
++ /*
++ * Root decoder IG is always set to value in CFMWS which
++ * may be different than this region's IG. We can use the
++ * region's IG here since interleave_granularity_store()
++ * does not allow interleaved host-bridges with
++ * root IG != region IG.
++ */
++ parent_ig = p->interleave_granularity;
+ parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
+ /*
+ * For purposes of address bit routing, use power-of-2 math for
+@@ -1676,6 +1683,12 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ return -ENXIO;
+ }
+
++ if (p->nr_targets >= p->interleave_ways) {
++ dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
++ p->nr_targets);
++ return -EINVAL;
++ }
++
+ ep_port = cxled_to_port(cxled);
+ root_port = cxlrd_to_port(cxlrd);
+ dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
+@@ -1768,7 +1781,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ if (p->nr_targets == p->interleave_ways) {
+ rc = cxl_region_setup_targets(cxlr);
+ if (rc)
+- goto err_decrement;
++ return rc;
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
+@@ -1800,12 +1813,6 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ }
+
+ return 0;
+-
+-err_decrement:
+- p->nr_targets--;
+- cxled->pos = -1;
+- p->targets[pos] = NULL;
+- return rc;
+ }
+
+ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
+diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
+index 2e37c47044af5..5e17e2d3dccad 100644
+--- a/drivers/dma/stm32-mdma.c
++++ b/drivers/dma/stm32-mdma.c
+@@ -490,7 +490,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
+ src_maxburst = chan->dma_config.src_maxburst;
+ dst_maxburst = chan->dma_config.dst_maxburst;
+
+- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
++ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
+ ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+
+@@ -966,7 +966,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
+ if (!desc)
+ return NULL;
+
+- ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
++ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
+ ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+ cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
+diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
+index fde33acd46b75..18060a2dcf906 100644
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -171,6 +171,12 @@ static enum qcom_scm_convention __get_convention(void)
+ if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
+ return qcom_scm_convention;
+
++ /*
++ * Per the "SMC calling convention specification", the 64-bit calling
++ * convention can only be used when the client is 64-bit, otherwise
++ * system will encounter the undefined behaviour.
++ */
++#if IS_ENABLED(CONFIG_ARM64)
+ /*
+ * Device isn't required as there is only one argument - no device
+ * needed to dma_map_single to secure world
+@@ -191,6 +197,7 @@ static enum qcom_scm_convention __get_convention(void)
+ forced = true;
+ goto found;
+ }
++#endif
+
+ probed_convention = SMC_CONVENTION_ARM_32;
+ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index a775d2bdac94f..980ec04892173 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -1655,6 +1655,26 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ .ignore_wake = "SYNA1202:00@16",
+ },
+ },
++ {
++ /*
++ * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
++ * a "dolby" button. At the ACPI level an _AEI event-handler
++ * is connected which sets an ACPI variable to 1 on both
++ * edges. This variable can be polled + cleared to 0 using
++ * WMI. But since the variable is set on both edges the WMI
++ * interface is pretty useless even when polling.
++ * So instead the x86-android-tablets code instantiates
++ * a gpio-keys platform device for it.
++ * Ignore the _AEI handler for the pin, so that it is not busy.
++ */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
++ },
++ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++ .ignore_interrupt = "INT33FC:00@3",
++ },
++ },
+ {} /* Terminating entry */
+ };
+
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index 1436cdb5fa268..219bf8a82d8f9 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -496,6 +496,10 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
+ #if IS_ENABLED(CONFIG_SND_SOC_CS42L56)
+ { "reset", "cirrus,gpio-nreset", "cirrus,cs42l56" },
+ #endif
++#if IS_ENABLED(CONFIG_SND_SOC_MT2701_CS42448)
++ { "i2s1-in-sel-gpio1", NULL, "mediatek,mt2701-cs42448-machine" },
++ { "i2s1-in-sel-gpio2", NULL, "mediatek,mt2701-cs42448-machine" },
++#endif
+ #if IS_ENABLED(CONFIG_SND_SOC_TLV320AIC3X)
+ { "reset", "gpio-reset", "ti,tlv320aic3x" },
+ { "reset", "gpio-reset", "ti,tlv320aic33" },
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+index b582b83c4984f..3c52f49069330 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+@@ -29,6 +29,7 @@
+ #include "amdgpu.h"
+ #include "atom.h"
+
++#include <linux/device.h>
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+ #include <linux/acpi.h>
+@@ -287,6 +288,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
++ /* ATRM is for on-platform devices only */
++ if (dev_is_removable(&adev->pdev->dev))
++ return false;
++
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 252a876b07258..fdc302aa59e7b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -179,6 +179,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ }
+
+ rcu_read_unlock();
++ *result = NULL;
+ return -ENOENT;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 946d031d2520e..73f7ced3bf072 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1438,7 +1438,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ if (r == -ENOMEM)
+ DRM_ERROR("Not enough memory for command submission!\n");
+ else if (r != -ERESTARTSYS && r != -EAGAIN)
+- DRM_ERROR("Failed to process the buffer list %d!\n", r);
++ DRM_DEBUG("Failed to process the buffer list %d!\n", r);
+ goto error_fini;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index 56e89e76ff179..33cada366eeb1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -747,6 +747,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ ssize_t result = 0;
+ int r;
+
++ if (!adev->smc_rreg)
++ return -EPERM;
++
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+@@ -803,6 +806,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
+ ssize_t result = 0;
+ int r;
+
++ if (!adev->smc_wreg)
++ return -EPERM;
++
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 8940ee73f2dfe..77fc71e74c124 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -43,6 +43,7 @@
+ #include <drm/drm_fb_helper.h>
+ #include <drm/drm_probe_helper.h>
+ #include <drm/amdgpu_drm.h>
++#include <linux/device.h>
+ #include <linux/vgaarb.h>
+ #include <linux/vga_switcheroo.h>
+ #include <linux/efi.h>
+@@ -2233,7 +2234,6 @@ out:
+ */
+ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ {
+- struct drm_device *dev = adev_to_drm(adev);
+ struct pci_dev *parent;
+ int i, r;
+ bool total;
+@@ -2304,7 +2304,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ (amdgpu_is_atpx_hybrid() ||
+ amdgpu_has_atpx_dgpu_power_cntl()) &&
+ ((adev->flags & AMD_IS_APU) == 0) &&
+- !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
++ !dev_is_removable(&adev->pdev->dev))
+ adev->flags |= AMD_IS_PX;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+@@ -2318,6 +2318,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
+ adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
++ if (!amdgpu_device_pcie_dynamic_switching_supported())
++ adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
+
+ total = true;
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+@@ -4130,7 +4132,7 @@ fence_driver_init:
+
+ px = amdgpu_device_supports_px(ddev);
+
+- if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++ if (px || (!dev_is_removable(&adev->pdev->dev) &&
+ apple_gmux_detect(NULL, NULL)))
+ vga_switcheroo_register_client(adev->pdev,
+ &amdgpu_switcheroo_ops, px);
+@@ -4276,7 +4278,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+
+ px = amdgpu_device_supports_px(adev_to_drm(adev));
+
+- if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++ if (px || (!dev_is_removable(&adev->pdev->dev) &&
+ apple_gmux_detect(NULL, NULL)))
+ vga_switcheroo_unregister_client(adev->pdev);
+
+@@ -5399,7 +5401,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+- if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
++ if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
++ amdgpu_ras_get_context(adev)->reboot) {
+ DRM_WARN("Emergency reboot.");
+
+ ksys_sync_helper();
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 8e1cfc87122d6..dcf685b501e01 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -92,6 +92,7 @@
+ MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
+
+ #define mmRCC_CONFIG_MEMSIZE 0xde3
++#define mmMP0_SMN_C2PMSG_33 0x16061
+ #define mmMM_INDEX 0x0
+ #define mmMM_INDEX_HI 0x6
+ #define mmMM_DATA 0x1
+@@ -230,8 +231,26 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
+ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ uint8_t *binary)
+ {
+- uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+- int ret = 0;
++ uint64_t vram_size;
++ u32 msg;
++ int i, ret = 0;
++
++ /* It can take up to a second for IFWI init to complete on some dGPUs,
++ * but generally it should be in the 60-100ms range. Normally this starts
++ * as soon as the device gets power so by the time the OS loads this has long
++ * completed. However, when a card is hotplugged via e.g., USB4, we need to
++ * wait for this to complete. Once the C2PMSG is updated, we can
++ * continue.
++ */
++ if (dev_is_removable(&adev->pdev->dev)) {
++ for (i = 0; i < 1000; i++) {
++ msg = RREG32(mmMP0_SMN_C2PMSG_33);
++ if (msg & 0x80000000)
++ break;
++ msleep(1);
++ }
++ }
++ vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+
+ if (vram_size) {
+ uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+index f808841310fdf..1035f7982f3b5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+@@ -627,8 +627,20 @@ static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
+ mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
+ mqd_prop.hqd_active = false;
+
++ if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
++ p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
++ mutex_lock(&adev->srbm_mutex);
++ amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
++ }
++
+ mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
+
++ if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
++ p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
++ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
++ mutex_unlock(&adev->srbm_mutex);
++ }
++
+ amdgpu_bo_unreserve(q->mqd_obj);
+ }
+
+@@ -1062,9 +1074,13 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
+ switch (queue_type) {
+ case AMDGPU_RING_TYPE_GFX:
+ ring->funcs = adev->gfx.gfx_ring[0].funcs;
++ ring->me = adev->gfx.gfx_ring[0].me;
++ ring->pipe = adev->gfx.gfx_ring[0].pipe;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ ring->funcs = adev->gfx.compute_ring[0].funcs;
++ ring->me = adev->gfx.compute_ring[0].me;
++ ring->pipe = adev->gfx.compute_ring[0].pipe;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ ring->funcs = adev->sdma.instance[0].ring.funcs;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 7d5019a884024..2003be3390aab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -1380,7 +1380,8 @@ static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
+ {
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+- sysfs_remove_file_from_group(&adev->dev->kobj,
++ if (adev->dev->kobj.sd)
++ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &con->badpages_attr.attr,
+ RAS_FS_NAME);
+ }
+@@ -1397,7 +1398,8 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
+ .attrs = attrs,
+ };
+
+- sysfs_remove_group(&adev->dev->kobj, &group);
++ if (adev->dev->kobj.sd)
++ sysfs_remove_group(&adev->dev->kobj, &group);
+
+ return 0;
+ }
+@@ -1444,7 +1446,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
+ if (!obj || !obj->attr_inuse)
+ return -EINVAL;
+
+- sysfs_remove_file_from_group(&adev->dev->kobj,
++ if (adev->dev->kobj.sd)
++ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &obj->sysfs_attr.attr,
+ RAS_FS_NAME);
+ obj->attr_inuse = 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index ae455aab5d29d..7e54abca45206 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -292,8 +292,15 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+ void *ptr;
+ int i, idx;
+
++ bool in_ras_intr = amdgpu_ras_intr_triggered();
++
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
++ /* err_event_athub will corrupt VCPU buffer, so we need to
++ * restore fw data and clear buffer in amdgpu_vcn_resume() */
++ if (in_ras_intr)
++ return 0;
++
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+index d0748bcfad16b..75d25fba80821 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+@@ -239,6 +239,8 @@ static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
+
+ for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+ mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
++ if (!mode)
++ continue;
+ drm_mode_probed_add(connector, mode);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 7a67bb1490159..0d2e50385c3e4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1099,8 +1099,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
+ bo = gem_to_amdgpu_bo(gobj);
+ }
+ mem = bo->tbo.resource;
+- if (mem->mem_type == TTM_PL_TT ||
+- mem->mem_type == AMDGPU_PL_PREEMPT)
++ if (mem && (mem->mem_type == TTM_PL_TT ||
++ mem->mem_type == AMDGPU_PL_PREEMPT))
+ pages_addr = bo->tbo.ttm->dma_address;
+ }
+
+@@ -2129,7 +2129,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
+ * Returns:
+ * 0 for success, error for failure.
+ */
+-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
++int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
++ int32_t xcp_id)
+ {
+ struct amdgpu_bo *root_bo;
+ struct amdgpu_bo_vm *root;
+@@ -2148,6 +2149,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
+ INIT_LIST_HEAD(&vm->done);
+ INIT_LIST_HEAD(&vm->pt_freed);
+ INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
++ INIT_KFIFO(vm->faults);
+
+ r = amdgpu_vm_init_entities(adev, vm);
+ if (r)
+@@ -2182,34 +2184,33 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
+ false, &root, xcp_id);
+ if (r)
+ goto error_free_delayed;
+- root_bo = &root->bo;
++
++ root_bo = amdgpu_bo_ref(&root->bo);
+ r = amdgpu_bo_reserve(root_bo, true);
+- if (r)
+- goto error_free_root;
++ if (r) {
++ amdgpu_bo_unref(&root->shadow);
++ amdgpu_bo_unref(&root_bo);
++ goto error_free_delayed;
++ }
+
++ amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
+ r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
+ if (r)
+- goto error_unreserve;
+-
+- amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
++ goto error_free_root;
+
+ r = amdgpu_vm_pt_clear(adev, vm, root, false);
+ if (r)
+- goto error_unreserve;
++ goto error_free_root;
+
+ amdgpu_bo_unreserve(vm->root.bo);
+-
+- INIT_KFIFO(vm->faults);
++ amdgpu_bo_unref(&root_bo);
+
+ return 0;
+
+-error_unreserve:
+- amdgpu_bo_unreserve(vm->root.bo);
+-
+ error_free_root:
+- amdgpu_bo_unref(&root->shadow);
++ amdgpu_vm_pt_free_root(adev, vm);
++ amdgpu_bo_unreserve(vm->root.bo);
+ amdgpu_bo_unref(&root_bo);
+- vm->root.bo = NULL;
+
+ error_free_delayed:
+ dma_fence_put(vm->last_tlb_flush);
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+index 4038455d79984..ef368ca79a668 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+@@ -28,6 +28,7 @@
+ #include "nbio/nbio_2_3_offset.h"
+ #include "nbio/nbio_2_3_sh_mask.h"
+ #include <uapi/linux/kfd_ioctl.h>
++#include <linux/device.h>
+ #include <linux/pci.h>
+
+ #define smnPCIE_CONFIG_CNTL 0x11180044
+@@ -361,7 +362,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
+
+ data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+
+- if (pci_is_thunderbolt_attached(adev->pdev))
++ if (dev_is_removable(&adev->pdev->dev))
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ else
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+@@ -480,7 +481,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+- if (pci_is_thunderbolt_attached(adev->pdev))
++ if (dev_is_removable(&adev->pdev->dev))
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ else
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+index c7991e07b6be5..a7697ec8188e0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+@@ -268,7 +268,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
+ switch (encoding) {
+ case SQ_INTERRUPT_WORD_ENCODING_AUTO:
+- pr_debug(
++ pr_debug_ratelimited(
+ "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
+ SE_ID),
+@@ -284,7 +284,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
+ THREAD_TRACE_UTC_ERROR));
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_INST:
+- pr_debug("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
++ pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+@@ -310,7 +310,7 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
+ case SQ_INTERRUPT_WORD_ENCODING_ERROR:
+ sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
+ ERR_TYPE);
+- pr_warn("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
++ pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+index f933bd231fb9c..2a65792fd1162 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+@@ -150,7 +150,7 @@ enum SQ_INTERRUPT_ERROR_TYPE {
+
+ static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+ {
+- pr_debug(
++ pr_debug_ratelimited(
+ "sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT),
+@@ -165,7 +165,7 @@ static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+
+ static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+ {
+- pr_debug(
++ pr_debug_ratelimited(
+ "sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SH_ID),
+@@ -177,7 +177,7 @@ static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+
+ static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
+ {
+- pr_warn(
++ pr_warn_ratelimited(
+ "sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+index f0731a6a5306c..02695ccd22d6e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+@@ -333,7 +333,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
+ encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
+ switch (encoding) {
+ case SQ_INTERRUPT_WORD_ENCODING_AUTO:
+- pr_debug(
++ pr_debug_ratelimited(
+ "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
+@@ -347,7 +347,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_INST:
+- pr_debug("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
++ pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+@@ -366,7 +366,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_ERROR:
+ sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
+- pr_warn("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
++ pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 50f943e04f8a4..a5c394fcbb350 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -617,8 +617,15 @@ create_bo_failed:
+
+ void svm_range_vram_node_free(struct svm_range *prange)
+ {
+- svm_range_bo_unref(prange->svm_bo);
+- prange->ttm_res = NULL;
++ /* serialize prange->svm_bo unref */
++ mutex_lock(&prange->lock);
++ /* prange->svm_bo has not been unref */
++ if (prange->ttm_res) {
++ prange->ttm_res = NULL;
++ mutex_unlock(&prange->lock);
++ svm_range_bo_unref(prange->svm_bo);
++ } else
++ mutex_unlock(&prange->lock);
+ }
+
+ struct kfd_node *
+@@ -749,7 +756,7 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
+ prange->flags &= ~attrs[i].value;
+ break;
+ case KFD_IOCTL_SVM_ATTR_GRANULARITY:
+- prange->granularity = attrs[i].value;
++ prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
+ break;
+ default:
+ WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 573e27399c790..256058cd42851 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2077,7 +2077,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ struct dmub_srv_create_params create_params;
+ struct dmub_srv_region_params region_params;
+ struct dmub_srv_region_info region_info;
+- struct dmub_srv_fb_params fb_params;
++ struct dmub_srv_memory_params memory_params;
+ struct dmub_srv_fb_info *fb_info;
+ struct dmub_srv *dmub_srv;
+ const struct dmcub_firmware_header_v1_0 *hdr;
+@@ -2177,6 +2177,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
++ region_params.is_mailbox_in_inbox = false;
+
+ status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+ &region_info);
+@@ -2200,10 +2201,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ return r;
+
+ /* Rebase the regions on the framebuffer address. */
+- memset(&fb_params, 0, sizeof(fb_params));
+- fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
+- fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
+- fb_params.region_info = &region_info;
++ memset(&memory_params, 0, sizeof(memory_params));
++ memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
++ memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
++ memory_params.region_info = &region_info;
+
+ adev->dm.dmub_fb_info =
+ kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+@@ -2215,7 +2216,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
+ return -ENOMEM;
+ }
+
+- status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
++ status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ return -EINVAL;
+@@ -7394,6 +7395,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ int i;
+ int result = -EIO;
+
++ if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
++ return result;
++
+ cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+
+ if (!cmd.payloads)
+@@ -9504,14 +9508,14 @@ static bool should_reset_plane(struct drm_atomic_state *state,
+ struct drm_plane *other;
+ struct drm_plane_state *old_other_state, *new_other_state;
+ struct drm_crtc_state *new_crtc_state;
++ struct amdgpu_device *adev = drm_to_adev(plane->dev);
+ int i;
+
+ /*
+- * TODO: Remove this hack once the checks below are sufficient
+- * enough to determine when we need to reset all the planes on
+- * the stream.
++ * TODO: Remove this hack for all asics once it proves that the
++ * fast updates works fine on DCN3.2+.
+ */
+- if (state->allow_modeset)
++ if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
+ return true;
+
+ /* Exit early if we know that we're adding or removing the plane. */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index b885c39bd16ba..ad1a1368f5779 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1591,31 +1591,31 @@ enum dc_status dm_dp_mst_is_port_support_mode(
+ unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
+ unsigned int max_compressed_bw_in_kbps = 0;
+ struct dc_dsc_bw_range bw_range = {0};
+- struct drm_dp_mst_topology_mgr *mst_mgr;
++ uint16_t full_pbn = aconnector->mst_output_port->full_pbn;
+
+ /*
+- * check if the mode could be supported if DSC pass-through is supported
+- * AND check if there enough bandwidth available to support the mode
+- * with DSC enabled.
++ * Consider the case with the depth of the mst topology tree is equal or less than 2
++ * A. When dsc bitstream can be transmitted along the entire path
++ * 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND
++ * 2. dsc passthrough supported at MST branch, or
++ * 3. dsc decoding supported at leaf MST device
++ * Use maximum dsc compression as bw constraint
++ * B. When dsc bitstream cannot be transmitted along the entire path
++ * Use native bw as bw constraint
+ */
+ if (is_dsc_common_config_possible(stream, &bw_range) &&
+- aconnector->mst_output_port->passthrough_aux) {
+- mst_mgr = aconnector->mst_output_port->mgr;
+- mutex_lock(&mst_mgr->lock);
+-
++ (aconnector->mst_output_port->passthrough_aux ||
++ aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
+ cur_link_settings = stream->link->verified_link_cap;
+
+ upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+- &cur_link_settings
+- );
+- down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
++ &cur_link_settings);
++ down_link_bw_in_kbps = kbps_from_pbn(full_pbn);
+
+ /* pick the bottleneck */
+ end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
+ down_link_bw_in_kbps);
+
+- mutex_unlock(&mst_mgr->lock);
+-
+ /*
+ * use the maximum dsc compression bandwidth as the required
+ * bandwidth for the mode
+@@ -1630,8 +1630,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
+ /* check if mode could be supported within full_pbn */
+ bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
+ pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
+-
+- if (pbn > aconnector->mst_output_port->full_pbn)
++ if (pbn > full_pbn)
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 609048160aa20..b386f3b0fd428 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -993,7 +993,8 @@ static bool dc_construct(struct dc *dc,
+ /* set i2c speed if not done by the respective dcnxxx__resource.c */
+ if (dc->caps.i2c_speed_in_khz_hdcp == 0)
+ dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
+-
++ if (dc->caps.max_optimizable_video_width == 0)
++ dc->caps.max_optimizable_video_width = 5120;
+ dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
+ if (!dc->clk_mgr)
+ goto fail;
+@@ -1070,53 +1071,6 @@ static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *contex
+ }
+ }
+
+-static void phantom_pipe_blank(
+- struct dc *dc,
+- struct timing_generator *tg,
+- int width,
+- int height)
+-{
+- struct dce_hwseq *hws = dc->hwseq;
+- enum dc_color_space color_space;
+- struct tg_color black_color = {0};
+- struct output_pixel_processor *opp = NULL;
+- uint32_t num_opps, opp_id_src0, opp_id_src1;
+- uint32_t otg_active_width, otg_active_height;
+- uint32_t i;
+-
+- /* program opp dpg blank color */
+- color_space = COLOR_SPACE_SRGB;
+- color_space_to_black_color(dc, color_space, &black_color);
+-
+- otg_active_width = width;
+- otg_active_height = height;
+-
+- /* get the OPTC source */
+- tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+- ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
+-
+- for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
+- if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
+- opp = dc->res_pool->opps[i];
+- break;
+- }
+- }
+-
+- if (opp && opp->funcs->opp_set_disp_pattern_generator)
+- opp->funcs->opp_set_disp_pattern_generator(
+- opp,
+- CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+- COLOR_DEPTH_UNDEFINED,
+- &black_color,
+- otg_active_width,
+- otg_active_height,
+- 0);
+-
+- if (tg->funcs->is_tg_enabled(tg))
+- hws->funcs.wait_for_blank_complete(opp);
+-}
+-
+ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+ {
+ if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
+@@ -1207,7 +1161,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+
+ main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
+ main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
+- phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
++ if (dc->hwss.blank_phantom)
++ dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
+ tg->funcs->enable_crtc(tg);
+ }
+ }
+@@ -2476,6 +2431,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
+ }
+
+ static enum surface_update_type get_scaling_info_update_type(
++ const struct dc *dc,
+ const struct dc_surface_update *u)
+ {
+ union surface_update_flags *update_flags = &u->surface->update_flags;
+@@ -2510,6 +2466,12 @@ static enum surface_update_type get_scaling_info_update_type(
+ update_flags->bits.clock_change = 1;
+ }
+
++ if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
++ (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
++ u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
++ /* Changing clip size of a large surface may result in MPC slice count change */
++ update_flags->bits.bandwidth_change = 1;
++
+ if (u->scaling_info->src_rect.x != u->surface->src_rect.x
+ || u->scaling_info->src_rect.y != u->surface->src_rect.y
+ || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
+@@ -2547,7 +2509,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
+ type = get_plane_info_update_type(u);
+ elevate_update_type(&overall_type, type);
+
+- type = get_scaling_info_update_type(u);
++ type = get_scaling_info_update_type(dc, u);
+ elevate_update_type(&overall_type, type);
+
+ if (u->flip_addr) {
+@@ -4460,6 +4422,14 @@ bool dc_update_planes_and_stream(struct dc *dc,
+ update_type,
+ context);
+ } else {
++ if (!stream_update &&
++ dc->hwss.is_pipe_topology_transition_seamless &&
++ !dc->hwss.is_pipe_topology_transition_seamless(
++ dc, dc->current_state, context)) {
++
++ DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
++ BREAK_TO_DEBUGGER();
++ }
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index 6e11d2b701f82..569d40eb7059d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -556,7 +556,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+- if (res_ctx->pipe_ctx[i].stream != stream)
++ if (res_ctx->pipe_ctx[i].stream != stream || !tg)
+ continue;
+
+ return tg->funcs->get_frame_count(tg);
+@@ -615,7 +615,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+- if (res_ctx->pipe_ctx[i].stream != stream)
++ if (res_ctx->pipe_ctx[i].stream != stream || !tg)
+ continue;
+
+ tg->funcs->get_scanoutpos(tg,
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 81258392d44a1..dc0e0af616506 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -229,6 +229,11 @@ struct dc_caps {
+ uint32_t dmdata_alloc_size;
+ unsigned int max_cursor_size;
+ unsigned int max_video_width;
++ /*
++ * max video plane width that can be safely assumed to be always
++ * supported by single DPP pipe.
++ */
++ unsigned int max_optimizable_video_width;
+ unsigned int min_horizontal_blanking_period;
+ int linear_pitch_alignment;
+ bool dcc_const_color;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 9834b75f1837b..79befa17bb037 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -111,7 +111,8 @@ void dcn10_lock_all_pipes(struct dc *dc,
+ if (pipe_ctx->top_pipe ||
+ !pipe_ctx->stream ||
+ (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
+- !tg->funcs->is_tg_enabled(tg))
++ !tg->funcs->is_tg_enabled(tg) ||
++ pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
+ if (lock)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index 62a077adcdbfa..84fe449a2c7ed 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1846,8 +1846,16 @@ void dcn20_program_front_end_for_ctx(
+ dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+
+- if (tg->funcs->enable_crtc)
++ if (tg->funcs->enable_crtc) {
++ if (dc->hwss.blank_phantom) {
++ int main_pipe_width, main_pipe_height;
++
++ main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width;
++ main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height;
++ dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
++ }
+ tg->funcs->enable_crtc(tg);
++ }
+ }
+ }
+ /* OTG blank before disabling all front ends */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index d52d5feeb311b..ccbcfd6bd6b85 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -216,7 +216,7 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
+ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
+ {
+ int i;
+- uint8_t num_ways = 0;
++ uint32_t num_ways = 0;
+ uint32_t mall_ss_size_bytes = 0;
+
+ mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
+@@ -246,7 +246,8 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
+ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ {
+ union dmub_rb_cmd cmd;
+- uint8_t ways, i;
++ uint8_t i;
++ uint32_t ways;
+ int j;
+ bool mall_ss_unsupported = false;
+ struct dc_plane_state *plane = NULL;
+@@ -306,7 +307,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
+ cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
+ cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
+ cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
+- cmd.cab.cab_alloc_ways = ways;
++ cmd.cab.cab_alloc_ways = (uint8_t)ways;
+
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+
+@@ -1575,3 +1576,101 @@ void dcn32_init_blank(
+ if (opp)
+ hws->funcs.wait_for_blank_complete(opp);
+ }
++
++void dcn32_blank_phantom(struct dc *dc,
++ struct timing_generator *tg,
++ int width,
++ int height)
++{
++ struct dce_hwseq *hws = dc->hwseq;
++ enum dc_color_space color_space;
++ struct tg_color black_color = {0};
++ struct output_pixel_processor *opp = NULL;
++ uint32_t num_opps, opp_id_src0, opp_id_src1;
++ uint32_t otg_active_width, otg_active_height;
++ uint32_t i;
++
++ /* program opp dpg blank color */
++ color_space = COLOR_SPACE_SRGB;
++ color_space_to_black_color(dc, color_space, &black_color);
++
++ otg_active_width = width;
++ otg_active_height = height;
++
++ /* get the OPTC source */
++ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
++ ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
++
++ for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
++ if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
++ opp = dc->res_pool->opps[i];
++ break;
++ }
++ }
++
++ if (opp && opp->funcs->opp_set_disp_pattern_generator)
++ opp->funcs->opp_set_disp_pattern_generator(
++ opp,
++ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
++ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
++ COLOR_DEPTH_UNDEFINED,
++ &black_color,
++ otg_active_width,
++ otg_active_height,
++ 0);
++
++ if (tg->funcs->is_tg_enabled(tg))
++ hws->funcs.wait_for_blank_complete(opp);
++}
++
++bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
++ const struct dc_state *cur_ctx,
++ const struct dc_state *new_ctx)
++{
++ int i;
++ const struct pipe_ctx *cur_pipe, *new_pipe;
++ bool is_seamless = true;
++
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ cur_pipe = &cur_ctx->res_ctx.pipe_ctx[i];
++ new_pipe = &new_ctx->res_ctx.pipe_ctx[i];
++
++ if (resource_is_pipe_type(cur_pipe, FREE_PIPE) ||
++ resource_is_pipe_type(new_pipe, FREE_PIPE))
++ /* adding or removing free pipes is always seamless */
++ continue;
++ else if (resource_is_pipe_type(cur_pipe, OTG_MASTER)) {
++ if (resource_is_pipe_type(new_pipe, OTG_MASTER))
++ if (cur_pipe->stream->stream_id == new_pipe->stream->stream_id)
++ /* OTG master with the same stream is seamless */
++ continue;
++ } else if (resource_is_pipe_type(cur_pipe, OPP_HEAD)) {
++ if (resource_is_pipe_type(new_pipe, OPP_HEAD)) {
++ if (cur_pipe->stream_res.tg == new_pipe->stream_res.tg)
++ /*
++ * OPP heads sharing the same timing
++ * generator is seamless
++ */
++ continue;
++ }
++ } else if (resource_is_pipe_type(cur_pipe, DPP_PIPE)) {
++ if (resource_is_pipe_type(new_pipe, DPP_PIPE)) {
++ if (cur_pipe->stream_res.opp == new_pipe->stream_res.opp)
++ /*
++ * DPP pipes sharing the same OPP head is
++ * seamless
++ */
++ continue;
++ }
++ }
++
++ /*
++ * This pipe's transition doesn't fall under any seamless
++ * conditions
++ */
++ is_seamless = false;
++ break;
++ }
++
++ return is_seamless;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+index 2d2628f31bed7..9992e40acd217 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+@@ -115,4 +115,13 @@ void dcn32_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg);
+
++void dcn32_blank_phantom(struct dc *dc,
++ struct timing_generator *tg,
++ int width,
++ int height);
++
++bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
++ const struct dc_state *cur_ctx,
++ const struct dc_state *new_ctx);
++
+ #endif /* __DC_HWSS_DCN32_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+index 777b2fac20c4e..12e0f48a13e48 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+@@ -115,6 +115,8 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
+ .update_phantom_vp_position = dcn32_update_phantom_vp_position,
+ .update_dsc_pg = dcn32_update_dsc_pg,
+ .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
++ .blank_phantom = dcn32_blank_phantom,
++ .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
+ };
+
+ static const struct hwseq_private_funcs dcn32_private_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index 02ff99f7bec2b..66e680902c95c 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -388,6 +388,11 @@ struct hw_sequencer_funcs {
+ void (*z10_restore)(const struct dc *dc);
+ void (*z10_save_init)(struct dc *dc);
+
++ void (*blank_phantom)(struct dc *dc,
++ struct timing_generator *tg,
++ int width,
++ int height);
++
+ void (*update_visual_confirm_color)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ int mpcc_id);
+@@ -396,6 +401,9 @@ struct hw_sequencer_funcs {
+ struct dc_state *context,
+ struct pipe_ctx *phantom_pipe);
+ void (*apply_update_flags_for_phantom)(struct pipe_ctx *phantom_pipe);
++ bool (*is_pipe_topology_transition_seamless)(struct dc *dc,
++ const struct dc_state *cur_ctx,
++ const struct dc_state *new_ctx);
+
+ void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
+ void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
+diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+index 4585e0419da61..e2e8b35f3e26d 100644
+--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
++++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+@@ -186,6 +186,7 @@ struct dmub_srv_region_params {
+ uint32_t vbios_size;
+ const uint8_t *fw_inst_const;
+ const uint8_t *fw_bss_data;
++ bool is_mailbox_in_inbox;
+ };
+
+ /**
+@@ -205,20 +206,25 @@ struct dmub_srv_region_params {
+ */
+ struct dmub_srv_region_info {
+ uint32_t fb_size;
++ uint32_t inbox_size;
+ uint8_t num_regions;
+ struct dmub_region regions[DMUB_WINDOW_TOTAL];
+ };
+
+ /**
+- * struct dmub_srv_fb_params - parameters used for driver fb setup
++ * struct dmub_srv_memory_params - parameters used for driver fb setup
+ * @region_info: region info calculated by dmub service
+- * @cpu_addr: base cpu address for the framebuffer
+- * @gpu_addr: base gpu virtual address for the framebuffer
++ * @cpu_fb_addr: base cpu address for the framebuffer
++ * @cpu_inbox_addr: base cpu address for the gart
++ * @gpu_fb_addr: base gpu virtual address for the framebuffer
++ * @gpu_inbox_addr: base gpu virtual address for the gart
+ */
+-struct dmub_srv_fb_params {
++struct dmub_srv_memory_params {
+ const struct dmub_srv_region_info *region_info;
+- void *cpu_addr;
+- uint64_t gpu_addr;
++ void *cpu_fb_addr;
++ void *cpu_inbox_addr;
++ uint64_t gpu_fb_addr;
++ uint64_t gpu_inbox_addr;
+ };
+
+ /**
+@@ -545,8 +551,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+-enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+- const struct dmub_srv_fb_params *params,
++enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
++ const struct dmub_srv_memory_params *params,
+ struct dmub_srv_fb_info *out);
+
+ /**
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+index bdaf43892f47b..13ee22e9a1f4a 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+@@ -385,7 +385,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
+ uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+ uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
+-
++ uint32_t previous_top = 0;
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+@@ -410,8 +410,15 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ bios->base = dmub_align(stack->top, 256);
+ bios->top = bios->base + params->vbios_size;
+
+- mail->base = dmub_align(bios->top, 256);
+- mail->top = mail->base + DMUB_MAILBOX_SIZE;
++ if (params->is_mailbox_in_inbox) {
++ mail->base = 0;
++ mail->top = mail->base + DMUB_MAILBOX_SIZE;
++ previous_top = bios->top;
++ } else {
++ mail->base = dmub_align(bios->top, 256);
++ mail->top = mail->base + DMUB_MAILBOX_SIZE;
++ previous_top = mail->top;
++ }
+
+ fw_info = dmub_get_fw_meta_info(params);
+
+@@ -430,7 +437,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+ dmub->fw_version = fw_info->fw_version;
+ }
+
+- trace_buff->base = dmub_align(mail->top, 256);
++ trace_buff->base = dmub_align(previous_top, 256);
+ trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
+
+ fw_state->base = dmub_align(trace_buff->top, 256);
+@@ -441,11 +448,14 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
+
+ out->fb_size = dmub_align(scratch_mem->top, 4096);
+
++ if (params->is_mailbox_in_inbox)
++ out->inbox_size = dmub_align(mail->top, 4096);
++
+ return DMUB_STATUS_OK;
+ }
+
+-enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+- const struct dmub_srv_fb_params *params,
++enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
++ const struct dmub_srv_memory_params *params,
+ struct dmub_srv_fb_info *out)
+ {
+ uint8_t *cpu_base;
+@@ -460,8 +470,8 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+ if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
+ return DMUB_STATUS_INVALID;
+
+- cpu_base = (uint8_t *)params->cpu_addr;
+- gpu_base = params->gpu_addr;
++ cpu_base = (uint8_t *)params->cpu_fb_addr;
++ gpu_base = params->gpu_fb_addr;
+
+ for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
+ const struct dmub_region *reg =
+@@ -469,6 +479,12 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
+
+ out->fb[i].cpu_addr = cpu_base + reg->base;
+ out->fb[i].gpu_addr = gpu_base + reg->base;
++
++ if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
++ out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
++ out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
++ }
++
+ out->fb[i].size = reg->top - reg->base;
+ }
+
+@@ -657,9 +673,16 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
+- dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+- dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
+- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
++ uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
++ uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
++
++ if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
++ return DMUB_STATUS_HW_FAILURE;
++ } else {
++ dmub->inbox1_rb.rptr = rptr;
++ dmub->inbox1_rb.wrpt = wptr;
++ dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
++ }
+ }
+
+ return DMUB_STATUS_OK;
+@@ -693,6 +716,11 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
++ if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
++ dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
++ return DMUB_STATUS_HW_FAILURE;
++ }
++
+ if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ return DMUB_STATUS_OK;
+
+diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
+index 0b6a057e0a4c4..5aac8d545bdc6 100644
+--- a/drivers/gpu/drm/amd/include/pptable.h
++++ b/drivers/gpu/drm/amd/include/pptable.h
+@@ -78,7 +78,7 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+ typedef struct _ATOM_PPLIB_STATE
+ {
+ UCHAR ucNonClockStateIndex;
+- UCHAR ucClockStateIndices[1]; // variable-sized
++ UCHAR ucClockStateIndices[]; // variable-sized
+ } ATOM_PPLIB_STATE;
+
+
+@@ -473,7 +473,7 @@ typedef struct _ATOM_PPLIB_STATE_V2
+ /**
+ * Driver will read the first ucNumDPMLevels in this array
+ */
+- UCHAR clockInfoIndex[1];
++ UCHAR clockInfoIndex[];
+ } ATOM_PPLIB_STATE_V2;
+
+ typedef struct _StateArray{
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 7f7a476b6829c..eb2c1d59bc6a7 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -734,7 +734,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+- if (count > 127)
++ if (count > 127 || count == 0)
+ return -EINVAL;
+
+ if (*buf == 's')
+@@ -754,7 +754,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ else
+ return -EINVAL;
+
+- memcpy(buf_cpy, buf, count+1);
++ memcpy(buf_cpy, buf, count);
++ buf_cpy[count] = 0;
+
+ tmp_str = buf_cpy;
+
+@@ -771,6 +772,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ return -EINVAL;
+ parameter_size++;
+
++ if (!tmp_str)
++ break;
++
+ while (isspace(*tmp_str))
+ tmp_str++;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+index b0ac4d121adca..e0e40b054c08b 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+@@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State {
+ typedef struct _ATOM_Tonga_State_Array {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_State entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_State_Array;
+
+ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
+@@ -179,7 +179,7 @@ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
+ typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_MCLK_Dependency_Table;
+
+ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
+@@ -194,7 +194,7 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
+ typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_SCLK_Dependency_Table;
+
+ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+@@ -210,7 +210,7 @@ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+ typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Polaris_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Polaris_SCLK_Dependency_Table;
+
+ typedef struct _ATOM_Tonga_PCIE_Record {
+@@ -222,7 +222,7 @@ typedef struct _ATOM_Tonga_PCIE_Record {
+ typedef struct _ATOM_Tonga_PCIE_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_PCIE_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_PCIE_Table;
+
+ typedef struct _ATOM_Polaris10_PCIE_Record {
+@@ -235,7 +235,7 @@ typedef struct _ATOM_Polaris10_PCIE_Record {
+ typedef struct _ATOM_Polaris10_PCIE_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Polaris10_PCIE_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Polaris10_PCIE_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Polaris10_PCIE_Table;
+
+
+@@ -252,7 +252,7 @@ typedef struct _ATOM_Tonga_MM_Dependency_Record {
+ typedef struct _ATOM_Tonga_MM_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_MM_Dependency_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_MM_Dependency_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_MM_Dependency_Table;
+
+ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
+@@ -265,7 +265,7 @@ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
+ typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+- ATOM_Tonga_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries. */
++ ATOM_Tonga_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries. */
+ } ATOM_Tonga_Voltage_Lookup_Table;
+
+ typedef struct _ATOM_Tonga_Fan_Table {
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 1cb4022644977..a38888176805d 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -1823,9 +1823,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+
+ data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
+ data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
+- data->pcie_dpm_key_disabled =
+- !amdgpu_device_pcie_dynamic_switching_supported() ||
+- !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
++ data->pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
+ /* need to set voltage control types before EVV patching */
+ data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
+ data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 222af2fae7458..16c03771c1239 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1232,7 +1232,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
+ {
+ struct smu_feature *feature = &smu->smu_feature;
+ struct amdgpu_device *adev = smu->adev;
+- uint32_t pcie_gen = 0, pcie_width = 0;
++ uint8_t pcie_gen = 0, pcie_width = 0;
+ uint64_t features_supported;
+ int ret = 0;
+
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+index 6e2069dcb6b9d..d1d7713b97794 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+@@ -856,7 +856,7 @@ struct pptable_funcs {
+ * &pcie_gen_cap: Maximum allowed PCIe generation.
+ * &pcie_width_cap: Maximum allowed PCIe width.
+ */
+- int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
++ int (*update_pcie_parameters)(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap);
+
+ /**
+ * @i2c_init: Initialize i2c.
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+index 355c156d871af..cc02f979e9e98 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -296,8 +296,8 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ uint32_t pptable_id);
+
+ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+- uint32_t pcie_gen_cap,
+- uint32_t pcie_width_cap);
++ uint8_t pcie_gen_cap,
++ uint8_t pcie_width_cap);
+
+ #endif
+ #endif
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+index 95f6d821bacbc..addaa69119b8e 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+@@ -2375,8 +2375,8 @@ static int navi10_get_power_limit(struct smu_context *smu,
+ }
+
+ static int navi10_update_pcie_parameters(struct smu_context *smu,
+- uint32_t pcie_gen_cap,
+- uint32_t pcie_width_cap)
++ uint8_t pcie_gen_cap,
++ uint8_t pcie_width_cap)
+ {
+ struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 9119b0df2419f..94f22df5ac205 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2084,14 +2084,14 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
+ #define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+- uint32_t pcie_gen_cap,
+- uint32_t pcie_width_cap)
++ uint8_t pcie_gen_cap,
++ uint8_t pcie_width_cap)
+ {
+ struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
+ uint8_t *table_member1, *table_member2;
+- uint32_t min_gen_speed, max_gen_speed;
+- uint32_t min_lane_width, max_lane_width;
++ uint8_t min_gen_speed, max_gen_speed;
++ uint8_t min_lane_width, max_lane_width;
+ uint32_t smu_pcie_arg;
+ int ret, i;
+
+@@ -2107,7 +2107,7 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
+ min_lane_width = min_lane_width > max_lane_width ?
+ max_lane_width : min_lane_width;
+
+- if (!amdgpu_device_pcie_dynamic_switching_supported()) {
++ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ pcie_table->pcie_gen[0] = max_gen_speed;
+ pcie_table->pcie_lane[0] = max_lane_width;
+ } else {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 9b62b45ebb7f0..3bc60ecc7bfef 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -2426,8 +2426,8 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
+ }
+
+ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+- uint32_t pcie_gen_cap,
+- uint32_t pcie_width_cap)
++ uint8_t pcie_gen_cap,
++ uint8_t pcie_width_cap)
+ {
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+@@ -2436,7 +2436,10 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+ uint32_t smu_pcie_arg;
+ int ret, i;
+
+- if (!amdgpu_device_pcie_dynamic_switching_supported()) {
++ if (!num_of_levels)
++ return 0;
++
++ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index 0fb6be11a0cc7..41783c0243006 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -343,12 +343,12 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
+ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
+- if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO ||
+- powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
++ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO) {
+ smu_baco->platform_support = true;
+
+- if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
+- smu_baco->maco_support = true;
++ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
++ smu_baco->maco_support = true;
++ }
+
+ /*
+ * We are in the transition to a new OD mechanism.
+@@ -2162,38 +2162,10 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
+ }
+ }
+
+- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
+- (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
+- ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
+- ret = smu_cmn_update_table(smu,
+- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+- WORKLOAD_PPLIB_COMPUTE_BIT,
+- (void *)(&activity_monitor_external),
+- false);
+- if (ret) {
+- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+- return ret;
+- }
+-
+- ret = smu_cmn_update_table(smu,
+- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+- WORKLOAD_PPLIB_CUSTOM_BIT,
+- (void *)(&activity_monitor_external),
+- true);
+- if (ret) {
+- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+- return ret;
+- }
+-
+- workload_type = smu_cmn_to_asic_specific_index(smu,
+- CMN2ASIC_MAPPING_WORKLOAD,
+- PP_SMC_POWER_PROFILE_CUSTOM);
+- } else {
+- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+- workload_type = smu_cmn_to_asic_specific_index(smu,
++ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
++ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ smu->power_profile_mode);
+- }
+
+ if (workload_type < 0)
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index 62f2886ab4df6..8cc16b3d18a3a 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -333,12 +333,13 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
+ if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
+- if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO ||
+- powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
++ if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) {
+ smu_baco->platform_support = true;
+
+- if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
+- smu_baco->maco_support = true;
++ if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
++ && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
++ smu_baco->maco_support = true;
++ }
+
+ #if 0
+ if (!overdrive_lowerlimits->FeatureCtrlMask ||
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+index 3276a3e82c628..916f2c36bf2f7 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+@@ -1223,7 +1223,7 @@ int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
+ return 0;
+ }
+
+-static void
++static int
+ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ struct komeda_pipeline_state *new)
+ {
+@@ -1243,8 +1243,12 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ c = komeda_pipeline_get_component(pipe, id);
+ c_st = komeda_component_get_state_and_set_user(c,
+ drm_st, NULL, new->crtc);
++ if (PTR_ERR(c_st) == -EDEADLK)
++ return -EDEADLK;
+ WARN_ON(IS_ERR(c_st));
+ }
++
++ return 0;
+ }
+
+ /* release unclaimed pipeline resource */
+@@ -1266,9 +1270,8 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
+ if (WARN_ON(IS_ERR_OR_NULL(st)))
+ return -EINVAL;
+
+- komeda_pipeline_unbound_components(pipe, st);
++ return komeda_pipeline_unbound_components(pipe, st);
+
+- return 0;
+ }
+
+ /* Since standalone disabled components must be disabled separately and in the
+diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
+index fc7f5ec5fb381..8f5846b76d594 100644
+--- a/drivers/gpu/drm/bridge/ite-it66121.c
++++ b/drivers/gpu/drm/bridge/ite-it66121.c
+@@ -884,14 +884,14 @@ static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge,
+ mutex_lock(&ctx->lock);
+ ret = it66121_preamble_ddc(ctx);
+ if (ret) {
+- edid = ERR_PTR(ret);
++ edid = NULL;
+ goto out_unlock;
+ }
+
+ ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
+ IT66121_DDC_HEADER_EDID);
+ if (ret) {
+- edid = ERR_PTR(ret);
++ edid = NULL;
+ goto out_unlock;
+ }
+
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 69d855123d3e3..f1ceb7d08519e 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -3499,11 +3499,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
+ mode->vsync_end = mode->vsync_start + vsync_pulse_width;
+ mode->vtotal = mode->vdisplay + vblank;
+
+- /* Some EDIDs have bogus h/vtotal values */
+- if (mode->hsync_end > mode->htotal)
+- mode->htotal = mode->hsync_end + 1;
+- if (mode->vsync_end > mode->vtotal)
+- mode->vtotal = mode->vsync_end + 1;
++ /* Some EDIDs have bogus h/vsync_end values */
++ if (mode->hsync_end > mode->htotal) {
++ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing hsync_end %d->%d\n",
++ connector->base.id, connector->name,
++ mode->hsync_end, mode->htotal);
++ mode->hsync_end = mode->htotal;
++ }
++ if (mode->vsync_end > mode->vtotal) {
++ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing vsync_end %d->%d\n",
++ connector->base.id, connector->name,
++ mode->vsync_end, mode->vtotal);
++ mode->vsync_end = mode->vtotal;
++ }
+
+ drm_mode_do_interlace_quirk(mode, pt);
+
+diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
+index 150fe15550680..94375c6a54256 100644
+--- a/drivers/gpu/drm/drm_lease.c
++++ b/drivers/gpu/drm/drm_lease.c
+@@ -510,8 +510,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
+ /* Handle leased objects, if any */
+ idr_init(&leases);
+ if (object_count != 0) {
+- object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
+- array_size(object_count, sizeof(__u32)));
++ object_ids = memdup_array_user(u64_to_user_ptr(cl->object_ids),
++ object_count, sizeof(__u32));
+ if (IS_ERR(object_ids)) {
+ ret = PTR_ERR(object_ids);
+ idr_destroy(&leases);
+diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
+index f7f709df99b49..70d9adafa2333 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.h
++++ b/drivers/gpu/drm/gma500/psb_drv.h
+@@ -424,6 +424,7 @@ struct drm_psb_private {
+ uint32_t pipestat[PSB_NUM_PIPE];
+
+ spinlock_t irqmask_lock;
++ bool irq_enabled;
+
+ /* Power */
+ bool pm_initialized;
+diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
+index 343c51250207d..7bbb79b0497d8 100644
+--- a/drivers/gpu/drm/gma500/psb_irq.c
++++ b/drivers/gpu/drm/gma500/psb_irq.c
+@@ -327,6 +327,8 @@ int gma_irq_install(struct drm_device *dev)
+
+ gma_irq_postinstall(dev);
+
++ dev_priv->irq_enabled = true;
++
+ return 0;
+ }
+
+@@ -337,6 +339,9 @@ void gma_irq_uninstall(struct drm_device *dev)
+ unsigned long irqflags;
+ unsigned int i;
+
++ if (!dev_priv->irq_enabled)
++ return;
++
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ if (dev_priv->ops->hotplug_enable)
+diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
+index 4207863b7b2ae..4bba2f536b421 100644
+--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
+@@ -2680,6 +2680,18 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
+ for_each_pipe(dev_priv, pipe)
+ min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+
++ /*
++ * Avoid glk_force_audio_cdclk() causing excessive screen
++ * blinking when multiple pipes are active by making sure
++ * CDCLK frequency is always high enough for audio. With a
++ * single active pipe we can always change CDCLK frequency
++ * by changing the cd2x divider (see glk_cdclk_table[]) and
++ * thus a full modeset won't be needed then.
++ */
++ if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
++ !is_power_of_2(cdclk_state->active_pipes))
++ min_cdclk = max(2 * 96000, min_cdclk);
++
+ if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) {
+ drm_dbg_kms(&dev_priv->drm,
+ "required cdclk (%d kHz) exceeds max (%d kHz)\n",
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index cb55112d60657..ec28354efc1c3 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -430,7 +430,7 @@ static int mtl_max_source_rate(struct intel_dp *intel_dp)
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ if (intel_is_c10phy(i915, phy))
+- return intel_dp_is_edp(intel_dp) ? 675000 : 810000;
++ return 810000;
+
+ return 2000000;
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
+index 3ebf41859043e..cdf2455440bea 100644
+--- a/drivers/gpu/drm/i915/display/intel_tc.c
++++ b/drivers/gpu/drm/i915/display/intel_tc.c
+@@ -58,7 +58,7 @@ struct intel_tc_port {
+ struct delayed_work link_reset_work;
+ int link_refcount;
+ bool legacy_port:1;
+- char port_name[8];
++ const char *port_name;
+ enum tc_port_mode mode;
+ enum tc_port_mode init_mode;
+ enum phy_fia phy_fia;
+@@ -1841,8 +1841,12 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
+ else
+ tc->phy_ops = &icl_tc_phy_ops;
+
+- snprintf(tc->port_name, sizeof(tc->port_name),
+- "%c/TC#%d", port_name(port), tc_port + 1);
++ tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
++ tc_port + 1);
++ if (!tc->port_name) {
++ kfree(tc);
++ return -ENOMEM;
++ }
+
+ mutex_init(&tc->lock);
+ /* TODO: Combine the two works */
+@@ -1863,6 +1867,7 @@ void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
+ {
+ intel_tc_port_suspend(dig_port);
+
++ kfree(dig_port->tc->port_name);
+ kfree(dig_port->tc);
+ dig_port->tc = NULL;
+ }
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+index 9a9ff84c90d7e..e38f06a6e56eb 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -844,6 +844,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
+ if (idx >= pc->num_user_engines)
+ return -EINVAL;
+
++ idx = array_index_nospec(idx, pc->num_user_engines);
+ pe = &pc->user_engines[idx];
+
+ /* Only render engine supports RPCS configuration. */
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+index da21f2786b5d7..b20d8fe8aa95d 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
+@@ -190,6 +190,21 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
+ spin_unlock_irq(&uncore->lock);
+ }
+
++static bool needs_wc_ggtt_mapping(struct drm_i915_private *i915)
++{
++ /*
++ * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
++ * will be dropped. For WC mappings in general we have 64 byte burst
++ * writes when the WC buffer is flushed, so we can't use it, but have to
++ * resort to an uncached mapping. The WC issue is easily caught by the
++ * readback check when writing GTT PTE entries.
++ */
++ if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
++ return true;
++
++ return false;
++}
++
+ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+ {
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+@@ -197,8 +212,12 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+ /*
+ * Note that as an uncached mmio write, this will flush the
+ * WCB of the writes into the GGTT before it triggers the invalidate.
++ *
++ * Only perform this when GGTT is mapped as WC, see ggtt_probe_common().
+ */
+- intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
++ if (needs_wc_ggtt_mapping(ggtt->vm.i915))
++ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6,
++ GFX_FLSH_CNTL_EN);
+ }
+
+ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
+@@ -902,17 +921,11 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
+ GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
+ phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
+
+- /*
+- * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
+- * will be dropped. For WC mappings in general we have 64 byte burst
+- * writes when the WC buffer is flushed, so we can't use it, but have to
+- * resort to an uncached mapping. The WC issue is easily caught by the
+- * readback check when writing GTT PTE entries.
+- */
+- if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
+- ggtt->gsm = ioremap(phys_addr, size);
+- else
++ if (needs_wc_ggtt_mapping(i915))
+ ggtt->gsm = ioremap_wc(phys_addr, size);
++ else
++ ggtt->gsm = ioremap(phys_addr, size);
++
+ if (!ggtt->gsm) {
+ drm_err(&i915->drm, "Failed to map the ggtt page table\n");
+ return -ENOMEM;
+diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
+index 58bb1c55294c9..ccdc1afbf11b5 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
++++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
+@@ -584,19 +584,23 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
+
+ static void rc6_res_reg_init(struct intel_rc6 *rc6)
+ {
+- memset(rc6->res_reg, INVALID_MMIO_REG.reg, sizeof(rc6->res_reg));
++ i915_reg_t res_reg[INTEL_RC6_RES_MAX] = {
++ [0 ... INTEL_RC6_RES_MAX - 1] = INVALID_MMIO_REG,
++ };
+
+ switch (rc6_to_gt(rc6)->type) {
+ case GT_MEDIA:
+- rc6->res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6;
++ res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6;
+ break;
+ default:
+- rc6->res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED;
+- rc6->res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6;
+- rc6->res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p;
+- rc6->res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp;
++ res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED;
++ res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6;
++ res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p;
++ res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp;
+ break;
+ }
++
++ memcpy(rc6->res_reg, res_reg, sizeof(res_reg));
+ }
+
+ void intel_rc6_init(struct intel_rc6 *rc6)
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index 331685e1b7b7d..da5b0fac745b4 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -4286,11 +4286,8 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
+ u32 known_open_flags;
+ int ret;
+
+- if (!perf->i915) {
+- drm_dbg(&perf->i915->drm,
+- "i915 perf interface not available for this system\n");
++ if (!perf->i915)
+ return -ENOTSUPP;
+- }
+
+ known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
+ I915_PERF_FLAG_FD_NONBLOCK |
+@@ -4666,11 +4663,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
+ struct i915_oa_reg *regs;
+ int err, id;
+
+- if (!perf->i915) {
+- drm_dbg(&perf->i915->drm,
+- "i915 perf interface not available for this system\n");
++ if (!perf->i915)
+ return -ENOTSUPP;
+- }
+
+ if (!perf->metrics_kobj) {
+ drm_dbg(&perf->i915->drm,
+@@ -4832,11 +4826,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
+ struct i915_oa_config *oa_config;
+ int ret;
+
+- if (!perf->i915) {
+- drm_dbg(&perf->i915->drm,
+- "i915 perf interface not available for this system\n");
++ if (!perf->i915)
+ return -ENOTSUPP;
+- }
+
+ if (i915_perf_stream_paranoid && !perfmon_capable()) {
+ drm_dbg(&perf->i915->drm,
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index 076aa54910571..bd6ace487c048 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -1983,7 +1983,6 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+ bool enabled = mtk_dp->enabled;
+ struct edid *new_edid = NULL;
+ struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg;
+- struct cea_sad *sads;
+
+ if (!enabled) {
+ drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state);
+@@ -2006,11 +2005,16 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+ */
+ if (mtk_dp_parse_capabilities(mtk_dp)) {
+ drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
++ kfree(new_edid);
+ new_edid = NULL;
+ }
+
+ if (new_edid) {
++ struct cea_sad *sads;
++
+ audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads);
++ kfree(sads);
++
+ audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid);
+ }
+
+diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
+index 42d52510ffd4a..86a8e06c7a60f 100644
+--- a/drivers/gpu/drm/msm/dp/dp_panel.c
++++ b/drivers/gpu/drm/msm/dp/dp_panel.c
+@@ -289,26 +289,9 @@ int dp_panel_get_modes(struct dp_panel *dp_panel,
+
+ static u8 dp_panel_get_edid_checksum(struct edid *edid)
+ {
+- struct edid *last_block;
+- u8 *raw_edid;
+- bool is_edid_corrupt = false;
++ edid += edid->extensions;
+
+- if (!edid) {
+- DRM_ERROR("invalid edid input\n");
+- return 0;
+- }
+-
+- raw_edid = (u8 *)edid;
+- raw_edid += (edid->extensions * EDID_LENGTH);
+- last_block = (struct edid *)raw_edid;
+-
+- /* block type extension */
+- drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
+- if (!is_edid_corrupt)
+- return last_block->checksum;
+-
+- DRM_ERROR("Invalid block, no checksum\n");
+- return 0;
++ return edid->checksum;
+ }
+
+ void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
+diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
+index abb0788843c60..503ecea72c5ea 100644
+--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
++++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
+@@ -267,6 +267,8 @@ static int versatile_panel_get_modes(struct drm_panel *panel,
+ connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
+
+ mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode);
++ if (!mode)
++ return -ENOMEM;
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+index 3aa31f3d61574..687749bbec62c 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+@@ -506,29 +506,30 @@ static int st7703_prepare(struct drm_panel *panel)
+ return 0;
+
+ dev_dbg(ctx->dev, "Resetting the panel\n");
+- ret = regulator_enable(ctx->vcc);
++ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
++
++ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+- dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
++ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+ return ret;
+ }
+- ret = regulator_enable(ctx->iovcc);
++
++ ret = regulator_enable(ctx->vcc);
+ if (ret < 0) {
+- dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+- goto disable_vcc;
++ dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
++ regulator_disable(ctx->iovcc);
++ return ret;
+ }
+
+- gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+- usleep_range(20, 40);
++ /* Give power supplies time to stabilize before deasserting reset. */
++ usleep_range(10000, 20000);
++
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+- msleep(20);
++ usleep_range(15000, 20000);
+
+ ctx->prepared = true;
+
+ return 0;
+-
+-disable_vcc:
+- regulator_disable(ctx->vcc);
+- return ret;
+ }
+
+ static const u32 mantix_bus_formats[] = {
+diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+index 845304435e235..f6a212e542cb9 100644
+--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
++++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+@@ -379,6 +379,8 @@ static int tpg110_get_modes(struct drm_panel *panel,
+ connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
+
+ mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode);
++ if (!mode)
++ return -ENOMEM;
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 6492a70e3c396..404b0483bb7cb 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -1229,6 +1229,9 @@ int qxl_destroy_monitors_object(struct qxl_device *qdev)
+ if (!qdev->monitors_config_bo)
+ return 0;
+
++ kfree(qdev->dumb_heads);
++ qdev->dumb_heads = NULL;
++
+ qdev->monitors_config = NULL;
+ qdev->ram_header->monitors_config = 0;
+
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 07193cd0c4174..4859d965d67e3 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1122,6 +1122,8 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
+ else {
+ /* only 800x600 is supported right now on pre-avivo chips */
+ tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
++ if (!tv_mode)
++ return 0;
+ tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, tv_mode);
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 3829be282ff00..17463aeeef28f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -774,9 +774,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ sizeof(metadata->mip_levels));
+ metadata->num_sizes = num_sizes;
+ metadata->sizes =
+- memdup_user((struct drm_vmw_size __user *)(unsigned long)
++ memdup_array_user((struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr,
+- sizeof(*metadata->sizes) * metadata->num_sizes);
++ metadata->num_sizes, sizeof(*metadata->sizes));
+ if (IS_ERR(metadata->sizes)) {
+ ret = PTR_ERR(metadata->sizes);
+ goto out_no_sizes;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index cc0d0186a0d95..fafc40ecfd200 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -366,6 +366,7 @@
+
+ #define USB_VENDOR_ID_DELL 0x413c
+ #define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
++#define USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W 0x4503
+
+ #define USB_VENDOR_ID_DELORME 0x1163
+ #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index 44763c0da4441..7c1b33be9d134 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -51,7 +51,12 @@ struct lenovo_drvdata {
+ int select_right;
+ int sensitivity;
+ int press_speed;
+- u8 middlebutton_state; /* 0:Up, 1:Down (undecided), 2:Scrolling */
++ /* 0: Up
++ * 1: Down (undecided)
++ * 2: Scrolling
++ * 3: Patched firmware, disable workaround
++ */
++ u8 middlebutton_state;
+ bool fn_lock;
+ };
+
+@@ -521,6 +526,19 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev)
+ int ret;
+ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+
++ /*
++ * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
++ * regular keys
++ */
++ ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
++ if (ret)
++ hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
++
++ /* Switch middle button to native mode */
++ ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
++ if (ret)
++ hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
++
+ ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock);
+ if (ret)
+ hid_err(hdev, "Fn-lock setting failed: %d\n", ret);
+@@ -668,31 +686,48 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
+ {
+ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+
+- /* "wheel" scroll events */
+- if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
+- usage->code == REL_HWHEEL)) {
+- /* Scroll events disable middle-click event */
+- cptkbd_data->middlebutton_state = 2;
+- return 0;
+- }
++ if (cptkbd_data->middlebutton_state != 3) {
++ /* REL_X and REL_Y events during middle button pressed
++ * are only possible on patched, bug-free firmware
++ * so set middlebutton_state to 3
++ * to never apply workaround anymore
++ */
++ if (cptkbd_data->middlebutton_state == 1 &&
++ usage->type == EV_REL &&
++ (usage->code == REL_X || usage->code == REL_Y)) {
++ cptkbd_data->middlebutton_state = 3;
++ /* send middle button press which was hold before */
++ input_event(field->hidinput->input,
++ EV_KEY, BTN_MIDDLE, 1);
++ input_sync(field->hidinput->input);
++ }
++
++ /* "wheel" scroll events */
++ if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
++ usage->code == REL_HWHEEL)) {
++ /* Scroll events disable middle-click event */
++ cptkbd_data->middlebutton_state = 2;
++ return 0;
++ }
+
+- /* Middle click events */
+- if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
+- if (value == 1) {
+- cptkbd_data->middlebutton_state = 1;
+- } else if (value == 0) {
+- if (cptkbd_data->middlebutton_state == 1) {
+- /* No scrolling inbetween, send middle-click */
+- input_event(field->hidinput->input,
+- EV_KEY, BTN_MIDDLE, 1);
+- input_sync(field->hidinput->input);
+- input_event(field->hidinput->input,
+- EV_KEY, BTN_MIDDLE, 0);
+- input_sync(field->hidinput->input);
++ /* Middle click events */
++ if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
++ if (value == 1) {
++ cptkbd_data->middlebutton_state = 1;
++ } else if (value == 0) {
++ if (cptkbd_data->middlebutton_state == 1) {
++ /* No scrolling inbetween, send middle-click */
++ input_event(field->hidinput->input,
++ EV_KEY, BTN_MIDDLE, 1);
++ input_sync(field->hidinput->input);
++ input_event(field->hidinput->input,
++ EV_KEY, BTN_MIDDLE, 0);
++ input_sync(field->hidinput->input);
++ }
++ cptkbd_data->middlebutton_state = 0;
+ }
+- cptkbd_data->middlebutton_state = 0;
++ return 1;
+ }
+- return 1;
+ }
+
+ if (usage->type == EV_KEY && usage->code == KEY_FN_ESC && value == 1) {
+@@ -1126,22 +1161,6 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
+ }
+ hid_set_drvdata(hdev, cptkbd_data);
+
+- /*
+- * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
+- * regular keys (Compact only)
+- */
+- if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD ||
+- hdev->product == USB_DEVICE_ID_LENOVO_CBTKBD) {
+- ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
+- if (ret)
+- hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
+- }
+-
+- /* Switch middle button to native mode */
+- ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
+- if (ret)
+- hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
+-
+ /* Set keyboard settings to known state */
+ cptkbd_data->middlebutton_state = 0;
+ cptkbd_data->fn_lock = true;
+@@ -1264,6 +1283,24 @@ err:
+ return ret;
+ }
+
++#ifdef CONFIG_PM
++static int lenovo_reset_resume(struct hid_device *hdev)
++{
++ switch (hdev->product) {
++ case USB_DEVICE_ID_LENOVO_CUSBKBD:
++ case USB_DEVICE_ID_LENOVO_TPIIUSBKBD:
++ if (hdev->type == HID_TYPE_USBMOUSE)
++ lenovo_features_set_cptkbd(hdev);
++
++ break;
++ default:
++ break;
++ }
++
++ return 0;
++}
++#endif
++
+ static void lenovo_remove_tpkbd(struct hid_device *hdev)
+ {
+ struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
+@@ -1380,6 +1417,9 @@ static struct hid_driver lenovo_driver = {
+ .raw_event = lenovo_raw_event,
+ .event = lenovo_event,
+ .report_fixup = lenovo_report_fixup,
++#ifdef CONFIG_PM
++ .reset_resume = lenovo_reset_resume,
++#endif
+ };
+ module_hid_driver(lenovo_driver);
+
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 3983b4f282f8f..5a48fcaa32f00 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -66,6 +66,7 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 9cfe8fc509d7d..c91d4ea35c9b8 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -158,6 +158,7 @@ config I2C_I801
+ Alder Lake (PCH)
+ Raptor Lake (PCH)
+ Meteor Lake (SOC and PCH)
++ Birch Stream (SOC)
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-i801.
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index 24bef0025c988..cebb39a1f15e6 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -518,10 +518,16 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
+
+ /*
+ * Because we don't know the buffer length in the
+- * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
+- * the transaction here.
++ * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
++ * transaction here. Also disable the TX_EMPTY IRQ
++ * while waiting for the data length byte to avoid the
++ * bogus interrupts flood.
+ */
+- if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
++ if (flags & I2C_M_RECV_LEN) {
++ dev->status |= STATUS_WRITE_IN_PROGRESS;
++ intr_mask &= ~DW_IC_INTR_TX_EMPTY;
++ break;
++ } else if (buf_len > 0) {
+ /* more bytes to be written */
+ dev->status |= STATUS_WRITE_IN_PROGRESS;
+ break;
+@@ -557,6 +563,13 @@ i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
+ msgs[dev->msg_read_idx].len = len;
+ msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
+
++ /*
++ * Received buffer length, re-enable TX_EMPTY interrupt
++ * to resume the SMBUS transaction.
++ */
++ regmap_update_bits(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_TX_EMPTY,
++ DW_IC_INTR_TX_EMPTY);
++
+ return len;
+ }
+
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 2a3215ac01b3a..7d78df30fe132 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -79,6 +79,7 @@
+ * Meteor Lake-P (SOC) 0x7e22 32 hard yes yes yes
+ * Meteor Lake SoC-S (SOC) 0xae22 32 hard yes yes yes
+ * Meteor Lake PCH-S (PCH) 0x7f23 32 hard yes yes yes
++ * Birch Stream (SOC) 0x5796 32 hard yes yes yes
+ *
+ * Features supported by this driver:
+ * Software PEC no
+@@ -231,6 +232,7 @@
+ #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3
++#define PCI_DEVICE_ID_INTEL_BIRCH_STREAM_SMBUS 0x5796
+ #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
+ #define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
+@@ -679,15 +681,11 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
+ return result ? priv->status : -ETIMEDOUT;
+ }
+
+- for (i = 1; i <= len; i++) {
+- if (i == len && read_write == I2C_SMBUS_READ)
+- smbcmd |= SMBHSTCNT_LAST_BYTE;
+- outb_p(smbcmd, SMBHSTCNT(priv));
+-
+- if (i == 1)
+- outb_p(inb(SMBHSTCNT(priv)) | SMBHSTCNT_START,
+- SMBHSTCNT(priv));
++ if (len == 1 && read_write == I2C_SMBUS_READ)
++ smbcmd |= SMBHSTCNT_LAST_BYTE;
++ outb_p(smbcmd | SMBHSTCNT_START, SMBHSTCNT(priv));
+
++ for (i = 1; i <= len; i++) {
+ status = i801_wait_byte_done(priv);
+ if (status)
+ return status;
+@@ -710,9 +708,12 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
+ data->block[0] = len;
+ }
+
+- /* Retrieve/store value in SMBBLKDAT */
+- if (read_write == I2C_SMBUS_READ)
++ if (read_write == I2C_SMBUS_READ) {
+ data->block[i] = inb_p(SMBBLKDAT(priv));
++ if (i == len - 1)
++ outb_p(smbcmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv));
++ }
++
+ if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
+ outb_p(data->block[i+1], SMBBLKDAT(priv));
+
+@@ -1044,6 +1045,7 @@ static const struct pci_device_id i801_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
++ { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { 0, }
+ };
+
+diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
+index 937f7eebe9067..ef8ef3e270f8a 100644
+--- a/drivers/i2c/busses/i2c-pxa.c
++++ b/drivers/i2c/busses/i2c-pxa.c
+@@ -264,6 +264,9 @@ struct pxa_i2c {
+ u32 hs_mask;
+
+ struct i2c_bus_recovery_info recovery;
++ struct pinctrl *pinctrl;
++ struct pinctrl_state *pinctrl_default;
++ struct pinctrl_state *pinctrl_recovery;
+ };
+
+ #define _IBMR(i2c) ((i2c)->reg_ibmr)
+@@ -1300,12 +1303,13 @@ static void i2c_pxa_prepare_recovery(struct i2c_adapter *adap)
+ */
+ gpiod_set_value(i2c->recovery.scl_gpiod, ibmr & IBMR_SCLS);
+ gpiod_set_value(i2c->recovery.sda_gpiod, ibmr & IBMR_SDAS);
++
++ WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery));
+ }
+
+ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
+ {
+ struct pxa_i2c *i2c = adap->algo_data;
+- struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+ u32 isr;
+
+ /*
+@@ -1319,7 +1323,7 @@ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
+ i2c_pxa_do_reset(i2c);
+ }
+
+- WARN_ON(pinctrl_select_state(bri->pinctrl, bri->pins_default));
++ WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default));
+
+ dev_dbg(&i2c->adap.dev, "recovery: IBMR 0x%08x ISR 0x%08x\n",
+ readl(_IBMR(i2c)), readl(_ISR(i2c)));
+@@ -1341,20 +1345,76 @@ static int i2c_pxa_init_recovery(struct pxa_i2c *i2c)
+ if (IS_ENABLED(CONFIG_I2C_PXA_SLAVE))
+ return 0;
+
+- bri->pinctrl = devm_pinctrl_get(dev);
+- if (PTR_ERR(bri->pinctrl) == -ENODEV) {
+- bri->pinctrl = NULL;
++ i2c->pinctrl = devm_pinctrl_get(dev);
++ if (PTR_ERR(i2c->pinctrl) == -ENODEV)
++ i2c->pinctrl = NULL;
++ if (IS_ERR(i2c->pinctrl))
++ return PTR_ERR(i2c->pinctrl);
++
++ if (!i2c->pinctrl)
++ return 0;
++
++ i2c->pinctrl_default = pinctrl_lookup_state(i2c->pinctrl,
++ PINCTRL_STATE_DEFAULT);
++ i2c->pinctrl_recovery = pinctrl_lookup_state(i2c->pinctrl, "recovery");
++
++ if (IS_ERR(i2c->pinctrl_default) || IS_ERR(i2c->pinctrl_recovery)) {
++ dev_info(dev, "missing pinmux recovery information: %ld %ld\n",
++ PTR_ERR(i2c->pinctrl_default),
++ PTR_ERR(i2c->pinctrl_recovery));
++ return 0;
++ }
++
++ /*
++ * Claiming GPIOs can influence the pinmux state, and may glitch the
++ * I2C bus. Do this carefully.
++ */
++ bri->scl_gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
++ if (bri->scl_gpiod == ERR_PTR(-EPROBE_DEFER))
++ return -EPROBE_DEFER;
++ if (IS_ERR(bri->scl_gpiod)) {
++ dev_info(dev, "missing scl gpio recovery information: %pe\n",
++ bri->scl_gpiod);
++ return 0;
++ }
++
++ /*
++ * We have SCL. Pull SCL low and wait a bit so that SDA glitches
++ * have no effect.
++ */
++ gpiod_direction_output(bri->scl_gpiod, 0);
++ udelay(10);
++ bri->sda_gpiod = devm_gpiod_get(dev, "sda", GPIOD_OUT_HIGH_OPEN_DRAIN);
++
++ /* Wait a bit in case of a SDA glitch, and then release SCL. */
++ udelay(10);
++ gpiod_direction_output(bri->scl_gpiod, 1);
++
++ if (bri->sda_gpiod == ERR_PTR(-EPROBE_DEFER))
++ return -EPROBE_DEFER;
++
++ if (IS_ERR(bri->sda_gpiod)) {
++ dev_info(dev, "missing sda gpio recovery information: %pe\n",
++ bri->sda_gpiod);
+ return 0;
+ }
+- if (IS_ERR(bri->pinctrl))
+- return PTR_ERR(bri->pinctrl);
+
+ bri->prepare_recovery = i2c_pxa_prepare_recovery;
+ bri->unprepare_recovery = i2c_pxa_unprepare_recovery;
++ bri->recover_bus = i2c_generic_scl_recovery;
+
+ i2c->adap.bus_recovery_info = bri;
+
+- return 0;
++ /*
++ * Claiming GPIOs can change the pinmux state, which confuses the
++ * pinctrl since pinctrl's idea of the current setting is unaffected
++ * by the pinmux change caused by claiming the GPIO. Work around that
++ * by switching pinctrl to the GPIO state here. We do it this way to
++ * avoid glitching the I2C bus.
++ */
++ pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery);
++
++ return pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default);
+ }
+
+ static int i2c_pxa_probe(struct platform_device *dev)
+diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
+index fa6020dced595..85e035e7a1d75 100644
+--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
++++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
+@@ -201,6 +201,11 @@ static int p2wi_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
++ if (clk_freq == 0) {
++ dev_err(dev, "clock-frequency is set to 0 in DT\n");
++ return -EINVAL;
++ }
++
+ if (of_get_child_count(np) > 1) {
+ dev_err(dev, "P2WI only supports one slave device\n");
+ return -EINVAL;
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index 60746652fd525..7f30bcceebaed 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -931,8 +931,9 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
+ struct i2c_client *
+ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+ {
+- struct i2c_client *client;
+- int status;
++ struct i2c_client *client;
++ bool need_put = false;
++ int status;
+
+ client = kzalloc(sizeof *client, GFP_KERNEL);
+ if (!client)
+@@ -970,7 +971,6 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ client->dev.fwnode = info->fwnode;
+
+ device_enable_async_suspend(&client->dev);
+- i2c_dev_set_name(adap, client, info);
+
+ if (info->swnode) {
+ status = device_add_software_node(&client->dev, info->swnode);
+@@ -982,6 +982,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+ }
+ }
+
++ i2c_dev_set_name(adap, client, info);
+ status = device_register(&client->dev);
+ if (status)
+ goto out_remove_swnode;
+@@ -993,6 +994,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
+
+ out_remove_swnode:
+ device_remove_software_node(&client->dev);
++ need_put = true;
+ out_err_put_of_node:
+ of_node_put(info->of_node);
+ out_err:
+@@ -1000,7 +1002,10 @@ out_err:
+ "Failed to register i2c client %s at 0x%02x (%d)\n",
+ client->name, client->addr, status);
+ out_err_silent:
+- kfree(client);
++ if (need_put)
++ put_device(&client->dev);
++ else
++ kfree(client);
+ return ERR_PTR(status);
+ }
+ EXPORT_SYMBOL_GPL(i2c_new_client_device);
+diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
+index 1247e6e6e9751..05b8b8dfa9bdd 100644
+--- a/drivers/i2c/i2c-core.h
++++ b/drivers/i2c/i2c-core.h
+@@ -29,7 +29,7 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
+ */
+ static inline bool i2c_in_atomic_xfer_mode(void)
+ {
+- return system_state > SYSTEM_RUNNING && irqs_disabled();
++ return system_state > SYSTEM_RUNNING && !preemptible();
+ }
+
+ static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap)
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index a01b59e3599b5..7d337380a05d9 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -450,8 +450,8 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
+ return -EINVAL;
+
+- rdwr_pa = memdup_user(rdwr_arg.msgs,
+- rdwr_arg.nmsgs * sizeof(struct i2c_msg));
++ rdwr_pa = memdup_array_user(rdwr_arg.msgs,
++ rdwr_arg.nmsgs, sizeof(struct i2c_msg));
+ if (IS_ERR(rdwr_pa))
+ return PTR_ERR(rdwr_pa);
+
+diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
+index 01610fa5b0ccf..cfa5d53e5be0e 100644
+--- a/drivers/i3c/master/i3c-master-cdns.c
++++ b/drivers/i3c/master/i3c-master-cdns.c
+@@ -192,7 +192,7 @@
+ #define SLV_STATUS1_HJ_DIS BIT(18)
+ #define SLV_STATUS1_MR_DIS BIT(17)
+ #define SLV_STATUS1_PROT_ERR BIT(16)
+-#define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9)
++#define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
+ #define SLV_STATUS1_HAS_DA BIT(8)
+ #define SLV_STATUS1_DDR_RX_FULL BIT(7)
+ #define SLV_STATUS1_DDR_TX_FULL BIT(6)
+@@ -1624,13 +1624,13 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
+ /* Device ID0 is reserved to describe this master. */
+ master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
+ master->free_rr_slots = GENMASK(master->maxdevs, 1);
++ master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
++ master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
+
+ val = readl(master->regs + CONF_STATUS1);
+ master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
+ master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
+ master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
+- master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
+- master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
+
+ spin_lock_init(&master->ibi.lock);
+ master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+index 97bb49ff5b53b..47b9b4d4ed3fc 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+@@ -64,15 +64,17 @@ static int hci_dat_v1_init(struct i3c_hci *hci)
+ return -EOPNOTSUPP;
+ }
+
+- /* use a bitmap for faster free slot search */
+- hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
+- if (!hci->DAT_data)
+- return -ENOMEM;
+-
+- /* clear them */
+- for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
+- dat_w0_write(dat_idx, 0);
+- dat_w1_write(dat_idx, 0);
++ if (!hci->DAT_data) {
++ /* use a bitmap for faster free slot search */
++ hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
++ if (!hci->DAT_data)
++ return -ENOMEM;
++
++ /* clear them */
++ for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
++ dat_w0_write(dat_idx, 0);
++ dat_w1_write(dat_idx, 0);
++ }
+ }
+
+ return 0;
+@@ -87,7 +89,13 @@ static void hci_dat_v1_cleanup(struct i3c_hci *hci)
+ static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
+ {
+ unsigned int dat_idx;
++ int ret;
+
++ if (!hci->DAT_data) {
++ ret = hci_dat_v1_init(hci);
++ if (ret)
++ return ret;
++ }
+ dat_idx = find_first_zero_bit(hci->DAT_data, hci->DAT_entries);
+ if (dat_idx >= hci->DAT_entries)
+ return -ENOENT;
+@@ -103,7 +111,8 @@ static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
+ {
+ dat_w0_write(dat_idx, 0);
+ dat_w1_write(dat_idx, 0);
+- __clear_bit(dat_idx, hci->DAT_data);
++ if (hci->DAT_data)
++ __clear_bit(dat_idx, hci->DAT_data);
+ }
+
+ static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
+index 2990ac9eaade7..71b5dbe45c45c 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
+@@ -734,7 +734,7 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
+ unsigned int i;
+ bool handled = false;
+
+- for (i = 0; mask && i < 8; i++) {
++ for (i = 0; mask && i < rings->total; i++) {
+ struct hci_rh_data *rh;
+ u32 status;
+
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index 6c43992c8cf6b..aa79334cb06c7 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -93,6 +93,7 @@
+ #define SVC_I3C_MINTMASKED 0x098
+ #define SVC_I3C_MERRWARN 0x09C
+ #define SVC_I3C_MERRWARN_NACK BIT(2)
++#define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
+ #define SVC_I3C_MDMACTRL 0x0A0
+ #define SVC_I3C_MDATACTRL 0x0AC
+ #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
+@@ -175,6 +176,7 @@ struct svc_i3c_regs_save {
+ * @ibi.slots: Available IBI slots
+ * @ibi.tbq_slot: To be queued IBI slot
+ * @ibi.lock: IBI lock
++ * @lock: Transfer lock, protect between IBI work thread and callbacks from master
+ */
+ struct svc_i3c_master {
+ struct i3c_master_controller base;
+@@ -203,6 +205,7 @@ struct svc_i3c_master {
+ /* Prevent races within IBI handlers */
+ spinlock_t lock;
+ } ibi;
++ struct mutex lock;
+ };
+
+ /**
+@@ -225,6 +228,14 @@ static bool svc_i3c_master_error(struct svc_i3c_master *master)
+ if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
+ merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
+ writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
++
++ /* Ignore timeout error */
++ if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
++ dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
++ mstatus, merrwarn);
++ return false;
++ }
++
+ dev_err(master->dev,
+ "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
+ mstatus, merrwarn);
+@@ -331,6 +342,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ struct i3c_ibi_slot *slot;
+ unsigned int count;
+ u32 mdatactrl;
++ int ret, val;
+ u8 *buf;
+
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+@@ -340,6 +352,13 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
+ slot->len = 0;
+ buf = slot->data;
+
++ ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
++ SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
++ if (ret) {
++ dev_err(master->dev, "Timeout when polling for COMPLETE\n");
++ return ret;
++ }
++
+ while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
+ slot->len < SVC_I3C_FIFO_SIZE) {
+ mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
+@@ -384,6 +403,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ u32 status, val;
+ int ret;
+
++ mutex_lock(&master->lock);
+ /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
+ writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
+ SVC_I3C_MCTRL_IBIRESP_AUTO,
+@@ -394,6 +414,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
+ if (ret) {
+ dev_err(master->dev, "Timeout when polling for IBIWON\n");
++ svc_i3c_master_emit_stop(master);
+ goto reenable_ibis;
+ }
+
+@@ -460,12 +481,13 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+
+ reenable_ibis:
+ svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
++ mutex_unlock(&master->lock);
+ }
+
+ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
+ {
+ struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
+- u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
++ u32 active = readl(master->regs + SVC_I3C_MSTATUS);
+
+ if (!SVC_I3C_MSTATUS_SLVSTART(active))
+ return IRQ_NONE;
+@@ -1007,6 +1029,9 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ u32 reg;
+ int ret;
+
++ /* clean SVC_I3C_MINT_IBIWON w1c bits */
++ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
++
+ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ xfer_type |
+ SVC_I3C_MCTRL_IBIRESP_NACK |
+@@ -1025,6 +1050,23 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ goto emit_stop;
+ }
+
++ /*
++ * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
++ * with I3C Target Address.
++ *
++ * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
++ * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
++ * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
++ * a Hot-Join Request has been made.
++ *
++ * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
++ * and yield the above events handler.
++ */
++ if (SVC_I3C_MSTATUS_IBIWON(reg)) {
++ ret = -ENXIO;
++ goto emit_stop;
++ }
++
+ if (rnw)
+ ret = svc_i3c_master_read(master, in, xfer_len);
+ else
+@@ -1204,9 +1246,11 @@ static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
+ cmd->read_len = 0;
+ cmd->continued = false;
+
++ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
++ mutex_unlock(&master->lock);
+
+ ret = xfer->ret;
+ kfree(buf);
+@@ -1250,9 +1294,11 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
+ cmd->read_len = read_len;
+ cmd->continued = false;
+
++ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
++ mutex_unlock(&master->lock);
+
+ if (cmd->read_len != xfer_len)
+ ccc->dests[0].payload.len = cmd->read_len;
+@@ -1309,9 +1355,11 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ cmd->continued = (i + 1) < nxfers;
+ }
+
++ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
++ mutex_unlock(&master->lock);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+@@ -1347,9 +1395,11 @@ static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ cmd->continued = (i + 1 < nxfers);
+ }
+
++ mutex_lock(&master->lock);
+ svc_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ svc_i3c_master_dequeue_xfer(master, xfer);
++ mutex_unlock(&master->lock);
+
+ ret = xfer->ret;
+ svc_i3c_master_free_xfer(xfer);
+@@ -1540,6 +1590,8 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
+
+ INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
+ INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
++ mutex_init(&master->lock);
++
+ ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
+ IRQF_NO_SUSPEND, "svc-i3c-irq", master);
+ if (ret)
+diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
+index 48f02dcc81c1b..70011fdbf5f63 100644
+--- a/drivers/iio/adc/stm32-adc-core.c
++++ b/drivers/iio/adc/stm32-adc-core.c
+@@ -706,6 +706,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ struct stm32_adc_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
++ const struct of_device_id *of_id;
++
+ struct resource *res;
+ u32 max_rate;
+ int ret;
+@@ -718,8 +720,11 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, &priv->common);
+
+- priv->cfg = (const struct stm32_adc_priv_cfg *)
+- of_match_device(dev->driver->of_match_table, dev)->data;
++ of_id = of_match_device(dev->driver->of_match_table, dev);
++ if (!of_id)
++ return -ENODEV;
++
++ priv->cfg = (const struct stm32_adc_priv_cfg *)of_id->data;
+ priv->nb_adc_max = priv->cfg->num_adcs;
+ spin_lock_init(&priv->common.lock);
+
+diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
+index 08732e1ac9662..c132a9c073bff 100644
+--- a/drivers/infiniband/hw/hfi1/pcie.c
++++ b/drivers/infiniband/hw/hfi1/pcie.c
+@@ -3,6 +3,7 @@
+ * Copyright(c) 2015 - 2019 Intel Corporation.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/pci.h>
+ #include <linux/io.h>
+ #include <linux/delay.h>
+@@ -210,12 +211,6 @@ static u32 extract_speed(u16 linkstat)
+ return speed;
+ }
+
+-/* return the PCIe link speed from the given link status */
+-static u32 extract_width(u16 linkstat)
+-{
+- return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
+-}
+-
+ /* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */
+ static void update_lbus_info(struct hfi1_devdata *dd)
+ {
+@@ -228,7 +223,7 @@ static void update_lbus_info(struct hfi1_devdata *dd)
+ return;
+ }
+
+- dd->lbus_width = extract_width(linkstat);
++ dd->lbus_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat);
+ dd->lbus_speed = extract_speed(linkstat);
+ snprintf(dd->lbus_info, sizeof(dd->lbus_info),
+ "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
+diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
+index 9b3935042459e..8064959a95acd 100644
+--- a/drivers/iommu/iommufd/io_pagetable.c
++++ b/drivers/iommu/iommufd/io_pagetable.c
+@@ -1060,6 +1060,16 @@ static int iopt_area_split(struct iopt_area *area, unsigned long iova)
+ if (WARN_ON(rc))
+ goto err_remove_lhs;
+
++ /*
++ * If the original area has filled a domain, domains_itree has to be
++ * updated.
++ */
++ if (area->storage_domain) {
++ interval_tree_remove(&area->pages_node, &pages->domains_itree);
++ interval_tree_insert(&lhs->pages_node, &pages->domains_itree);
++ interval_tree_insert(&rhs->pages_node, &pages->domains_itree);
++ }
++
+ lhs->storage_domain = area->storage_domain;
+ lhs->pages = area->pages;
+ rhs->storage_domain = area->storage_domain;
+diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
+index 03c58e50cc44f..3d882cc605542 100644
+--- a/drivers/leds/trigger/ledtrig-netdev.c
++++ b/drivers/leds/trigger/ledtrig-netdev.c
+@@ -221,6 +221,9 @@ static ssize_t device_name_show(struct device *dev,
+ static int set_device_name(struct led_netdev_data *trigger_data,
+ const char *name, size_t size)
+ {
++ if (size >= IFNAMSIZ)
++ return -EINVAL;
++
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ mutex_lock(&trigger_data->lock);
+@@ -263,9 +266,6 @@ static ssize_t device_name_store(struct device *dev,
+ struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
+ int ret;
+
+- if (size >= IFNAMSIZ)
+- return -EINVAL;
+-
+ ret = set_device_name(trigger_data, buf, size);
+
+ if (ret < 0)
+diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
+index 0cac5bead84fa..d4eec09009809 100644
+--- a/drivers/mcb/mcb-core.c
++++ b/drivers/mcb/mcb-core.c
+@@ -246,6 +246,7 @@ int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
+ return 0;
+
+ out:
++ put_device(&dev->dev);
+
+ return ret;
+ }
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index 656b6b71c7682..1ae37e693de04 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -106,7 +106,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ return 0;
+
+ err:
+- put_device(&mdev->dev);
++ mcb_free_dev(mdev);
+
+ return ret;
+ }
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index bc309e41d074a..486e1180cc3a3 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -254,7 +254,7 @@ enum evict_result {
+
+ typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
+
+-static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context)
++static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
+ {
+ unsigned long tested = 0;
+ struct list_head *h = lru->cursor;
+@@ -295,7 +295,8 @@ static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *con
+
+ h = h->next;
+
+- cond_resched();
++ if (!no_sleep)
++ cond_resched();
+ }
+
+ return NULL;
+@@ -382,7 +383,10 @@ struct dm_buffer {
+ */
+
+ struct buffer_tree {
+- struct rw_semaphore lock;
++ union {
++ struct rw_semaphore lock;
++ rwlock_t spinlock;
++ } u;
+ struct rb_root root;
+ } ____cacheline_aligned_in_smp;
+
+@@ -393,9 +397,12 @@ struct dm_buffer_cache {
+ * on the locks.
+ */
+ unsigned int num_locks;
++ bool no_sleep;
+ struct buffer_tree trees[];
+ };
+
++static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
++
+ static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
+ {
+ return dm_hash_locks_index(block, num_locks);
+@@ -403,22 +410,34 @@ static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
+
+ static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
+ {
+- down_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
++ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++ read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++ else
++ down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+
+ static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
+ {
+- up_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
++ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++ read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++ else
++ up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+
+ static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
+ {
+- down_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
++ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++ write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++ else
++ down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+
+ static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
+ {
+- up_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
++ if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
++ write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
++ else
++ up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ }
+
+ /*
+@@ -442,18 +461,32 @@ static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool
+
+ static void __lh_lock(struct lock_history *lh, unsigned int index)
+ {
+- if (lh->write)
+- down_write(&lh->cache->trees[index].lock);
+- else
+- down_read(&lh->cache->trees[index].lock);
++ if (lh->write) {
++ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++ write_lock_bh(&lh->cache->trees[index].u.spinlock);
++ else
++ down_write(&lh->cache->trees[index].u.lock);
++ } else {
++ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++ read_lock_bh(&lh->cache->trees[index].u.spinlock);
++ else
++ down_read(&lh->cache->trees[index].u.lock);
++ }
+ }
+
+ static void __lh_unlock(struct lock_history *lh, unsigned int index)
+ {
+- if (lh->write)
+- up_write(&lh->cache->trees[index].lock);
+- else
+- up_read(&lh->cache->trees[index].lock);
++ if (lh->write) {
++ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++ write_unlock_bh(&lh->cache->trees[index].u.spinlock);
++ else
++ up_write(&lh->cache->trees[index].u.lock);
++ } else {
++ if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
++ read_unlock_bh(&lh->cache->trees[index].u.spinlock);
++ else
++ up_read(&lh->cache->trees[index].u.lock);
++ }
+ }
+
+ /*
+@@ -502,14 +535,18 @@ static struct dm_buffer *list_to_buffer(struct list_head *l)
+ return le_to_buffer(le);
+ }
+
+-static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks)
++static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
+ {
+ unsigned int i;
+
+ bc->num_locks = num_locks;
++ bc->no_sleep = no_sleep;
+
+ for (i = 0; i < bc->num_locks; i++) {
+- init_rwsem(&bc->trees[i].lock);
++ if (no_sleep)
++ rwlock_init(&bc->trees[i].u.spinlock);
++ else
++ init_rwsem(&bc->trees[i].u.lock);
+ bc->trees[i].root = RB_ROOT;
+ }
+
+@@ -648,7 +685,7 @@ static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode
+ struct lru_entry *le;
+ struct dm_buffer *b;
+
+- le = lru_evict(&bc->lru[list_mode], __evict_pred, &w);
++ le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
+ if (!le)
+ return NULL;
+
+@@ -702,7 +739,7 @@ static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_
+ struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
+
+ while (true) {
+- le = lru_evict(&bc->lru[old_mode], __evict_pred, &w);
++ le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
+ if (!le)
+ break;
+
+@@ -915,10 +952,11 @@ static void cache_remove_range(struct dm_buffer_cache *bc,
+ {
+ unsigned int i;
+
++ BUG_ON(bc->no_sleep);
+ for (i = 0; i < bc->num_locks; i++) {
+- down_write(&bc->trees[i].lock);
++ down_write(&bc->trees[i].u.lock);
+ __remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
+- up_write(&bc->trees[i].lock);
++ up_write(&bc->trees[i].u.lock);
+ }
+ }
+
+@@ -979,8 +1017,6 @@ struct dm_bufio_client {
+ struct dm_buffer_cache cache; /* must be last member */
+ };
+
+-static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
+-
+ /*----------------------------------------------------------------*/
+
+ #define dm_bufio_in_request() (!!current->bio_list)
+@@ -1871,7 +1907,8 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
+ if (need_submit)
+ submit_io(b, REQ_OP_READ, read_endio);
+
+- wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
++ if (nf != NF_GET) /* we already tested this condition above */
++ wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
+
+ if (b->read_error) {
+ int error = blk_status_to_errno(b->read_error);
+@@ -2421,7 +2458,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
+ r = -ENOMEM;
+ goto bad_client;
+ }
+- cache_init(&c->cache, num_locks);
++ cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
+
+ c->bdev = bdev;
+ c->block_size = block_size;
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index dc0463bf3c2cf..0fadb656a2158 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1700,11 +1700,17 @@ retry:
+ order = min(order, remaining_order);
+
+ while (order > 0) {
++ if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) +
++ (1 << order) > dm_crypt_pages_per_client))
++ goto decrease_order;
+ pages = alloc_pages(gfp_mask
+ | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP,
+ order);
+- if (likely(pages != NULL))
++ if (likely(pages != NULL)) {
++ percpu_counter_add(&cc->n_allocated_pages, 1 << order);
+ goto have_pages;
++ }
++decrease_order:
+ order--;
+ }
+
+@@ -1742,10 +1748,13 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
+
+ if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
+ bio_for_each_folio_all(fi, clone) {
+- if (folio_test_large(fi.folio))
++ if (folio_test_large(fi.folio)) {
++ percpu_counter_sub(&cc->n_allocated_pages,
++ 1 << folio_order(fi.folio));
+ folio_put(fi.folio);
+- else
++ } else {
+ mempool_free(&fi.folio->page, &cc->page_pool);
++ }
+ }
+ }
+ }
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 3ef9f018da60c..2099c755119e3 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -185,7 +185,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
+ {
+ if (unlikely(verity_hash(v, verity_io_hash_req(v, io),
+ data, 1 << v->data_dev_block_bits,
+- verity_io_real_digest(v, io))))
++ verity_io_real_digest(v, io), true)))
+ return 0;
+
+ return memcmp(verity_io_real_digest(v, io), want_digest,
+@@ -386,7 +386,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
+ /* Always re-validate the corrected block against the expected hash */
+ r = verity_hash(v, verity_io_hash_req(v, io), fio->output,
+ 1 << v->data_dev_block_bits,
+- verity_io_real_digest(v, io));
++ verity_io_real_digest(v, io), true);
+ if (unlikely(r < 0))
+ return r;
+
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 26adcfea03022..e115fcfe723c9 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -135,20 +135,21 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
+ * Wrapper for crypto_ahash_init, which handles verity salting.
+ */
+ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
+- struct crypto_wait *wait)
++ struct crypto_wait *wait, bool may_sleep)
+ {
+ int r;
+
+ ahash_request_set_tfm(req, v->tfm);
+- ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+- CRYPTO_TFM_REQ_MAY_BACKLOG,
+- crypto_req_done, (void *)wait);
++ ahash_request_set_callback(req,
++ may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
++ crypto_req_done, (void *)wait);
+ crypto_init_wait(wait);
+
+ r = crypto_wait_req(crypto_ahash_init(req), wait);
+
+ if (unlikely(r < 0)) {
+- DMERR("crypto_ahash_init failed: %d", r);
++ if (r != -ENOMEM)
++ DMERR("crypto_ahash_init failed: %d", r);
+ return r;
+ }
+
+@@ -179,12 +180,12 @@ out:
+ }
+
+ int verity_hash(struct dm_verity *v, struct ahash_request *req,
+- const u8 *data, size_t len, u8 *digest)
++ const u8 *data, size_t len, u8 *digest, bool may_sleep)
+ {
+ int r;
+ struct crypto_wait wait;
+
+- r = verity_hash_init(v, req, &wait);
++ r = verity_hash_init(v, req, &wait, may_sleep);
+ if (unlikely(r < 0))
+ goto out;
+
+@@ -322,7 +323,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
+
+ r = verity_hash(v, verity_io_hash_req(v, io),
+ data, 1 << v->hash_dev_block_bits,
+- verity_io_real_digest(v, io));
++ verity_io_real_digest(v, io), !io->in_tasklet);
+ if (unlikely(r < 0))
+ goto release_ret_r;
+
+@@ -556,7 +557,7 @@ static int verity_verify_io(struct dm_verity_io *io)
+ continue;
+ }
+
+- r = verity_hash_init(v, req, &wait);
++ r = verity_hash_init(v, req, &wait, !io->in_tasklet);
+ if (unlikely(r < 0))
+ return r;
+
+@@ -652,7 +653,7 @@ static void verity_tasklet(unsigned long data)
+
+ io->in_tasklet = true;
+ err = verity_verify_io(io);
+- if (err == -EAGAIN) {
++ if (err == -EAGAIN || err == -ENOMEM) {
+ /* fallback to retrying with work-queue */
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+@@ -1033,7 +1034,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
+ goto out;
+
+ r = verity_hash(v, req, zero_data, 1 << v->data_dev_block_bits,
+- v->zero_digest);
++ v->zero_digest, true);
+
+ out:
+ kfree(req);
+diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
+index 2f555b4203679..f96f4e281ee4a 100644
+--- a/drivers/md/dm-verity.h
++++ b/drivers/md/dm-verity.h
+@@ -128,7 +128,7 @@ extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ u8 *data, size_t len));
+
+ extern int verity_hash(struct dm_verity *v, struct ahash_request *req,
+- const u8 *data, size_t len, u8 *digest);
++ const u8 *data, size_t len, u8 *digest, bool may_sleep);
+
+ extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
+ sector_t block, u8 *digest, bool *is_zero);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 78d51dddf3a00..34b7196d9634c 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -449,7 +449,7 @@ void mddev_suspend(struct mddev *mddev)
+ set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
+ percpu_ref_kill(&mddev->active_io);
+
+- if (mddev->pers->prepare_suspend)
++ if (mddev->pers && mddev->pers->prepare_suspend)
+ mddev->pers->prepare_suspend(mddev);
+
+ wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io));
+diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
+index 49e0d9a095302..6f8fbd82e21c8 100644
+--- a/drivers/media/i2c/ccs/ccs-core.c
++++ b/drivers/media/i2c/ccs/ccs-core.c
+@@ -3097,7 +3097,7 @@ static int ccs_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+ try_fmt->code = sensor->internal_csi_format->code;
+ try_fmt->field = V4L2_FIELD_NONE;
+
+- if (ssd != sensor->pixel_array)
++ if (ssd == sensor->pixel_array)
+ continue;
+
+ try_comp = v4l2_subdev_get_try_compose(sd, fh->state, i);
+diff --git a/drivers/media/i2c/ccs/ccs-quirk.h b/drivers/media/i2c/ccs/ccs-quirk.h
+index 5838fcda92fd4..0b1a64958d714 100644
+--- a/drivers/media/i2c/ccs/ccs-quirk.h
++++ b/drivers/media/i2c/ccs/ccs-quirk.h
+@@ -32,12 +32,10 @@ struct ccs_sensor;
+ * @reg: Pointer to the register to access
+ * @value: Register value, set by the caller on write, or
+ * by the quirk on read
+- *
+- * @flags: Quirk flags
+- *
+ * @return: 0 on success, -ENOIOCTLCMD if no register
+ * access may be done by the caller (default read
+ * value is zero), else negative error code on error
++ * @flags: Quirk flags
+ */
+ struct ccs_quirk {
+ int (*limits)(struct ccs_sensor *sensor);
+diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
+index 74edcc76d12f4..6e1a0614e6d06 100644
+--- a/drivers/media/pci/cobalt/cobalt-driver.c
++++ b/drivers/media/pci/cobalt/cobalt-driver.c
+@@ -8,6 +8,7 @@
+ * All rights reserved.
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+ #include <media/i2c/adv7604.h>
+ #include <media/i2c/adv7842.h>
+@@ -210,17 +211,17 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
+ pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &stat);
+ cobalt_info("PCIe link capability 0x%08x: %s per lane and %u lanes\n",
+ capa, get_link_speed(capa),
+- (capa & PCI_EXP_LNKCAP_MLW) >> 4);
++ FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
+ cobalt_info("PCIe link control 0x%04x\n", ctrl);
+ cobalt_info("PCIe link status 0x%04x: %s per lane and %u lanes\n",
+ stat, get_link_speed(stat),
+- (stat & PCI_EXP_LNKSTA_NLW) >> 4);
++ FIELD_GET(PCI_EXP_LNKSTA_NLW, stat));
+
+ /* Bus */
+ pcie_capability_read_dword(pci_bus_dev, PCI_EXP_LNKCAP, &capa);
+ cobalt_info("PCIe bus link capability 0x%08x: %s per lane and %u lanes\n",
+ capa, get_link_speed(capa),
+- (capa & PCI_EXP_LNKCAP_MLW) >> 4);
++ FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
+
+ /* Slot */
+ pcie_capability_read_dword(pci_dev, PCI_EXP_SLTCAP, &capa);
+@@ -239,7 +240,7 @@ static unsigned pcie_link_get_lanes(struct cobalt *cobalt)
+ if (!pci_is_pcie(pci_dev))
+ return 0;
+ pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &link);
+- return (link & PCI_EXP_LNKSTA_NLW) >> 4;
++ return FIELD_GET(PCI_EXP_LNKSTA_NLW, link);
+ }
+
+ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
+@@ -250,7 +251,7 @@ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
+ if (!pci_is_pcie(pci_dev))
+ return 0;
+ pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &link);
+- return (link & PCI_EXP_LNKCAP_MLW) >> 4;
++ return FIELD_GET(PCI_EXP_LNKCAP_MLW, link);
+ }
+
+ static void msi_config_show(struct cobalt *cobalt, struct pci_dev *pci_dev)
+diff --git a/drivers/media/pci/intel/ipu-bridge.h b/drivers/media/pci/intel/ipu-bridge.h
+index 1ff0b2d04d929..1ed53d51e16a1 100644
+--- a/drivers/media/pci/intel/ipu-bridge.h
++++ b/drivers/media/pci/intel/ipu-bridge.h
+@@ -103,7 +103,7 @@ struct ipu_property_names {
+ struct ipu_node_names {
+ char port[7];
+ char endpoint[11];
+- char remote_port[7];
++ char remote_port[9];
+ char vcm[16];
+ };
+
+diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+index 0f8ac29d038db..23acc387be5f0 100644
+--- a/drivers/media/platform/qcom/camss/camss-csid-gen2.c
++++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+@@ -355,9 +355,6 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
+ u8 dt_id = vc;
+
+ if (tg->enabled) {
+- /* Config Test Generator */
+- vc = 0xa;
+-
+ /* configure one DT, infinite frames */
+ val = vc << TPG_VC_CFG0_VC_NUM;
+ val |= INTELEAVING_MODE_ONE_SHOT << TPG_VC_CFG0_LINE_INTERLEAVING_MODE;
+@@ -370,14 +367,14 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
+
+ writel_relaxed(0x12345678, csid->base + CSID_TPG_LFSR_SEED);
+
+- val = input_format->height & 0x1fff << TPG_DT_n_CFG_0_FRAME_HEIGHT;
+- val |= input_format->width & 0x1fff << TPG_DT_n_CFG_0_FRAME_WIDTH;
++ val = (input_format->height & 0x1fff) << TPG_DT_n_CFG_0_FRAME_HEIGHT;
++ val |= (input_format->width & 0x1fff) << TPG_DT_n_CFG_0_FRAME_WIDTH;
+ writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_0(0));
+
+ val = format->data_type << TPG_DT_n_CFG_1_DATA_TYPE;
+ writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_1(0));
+
+- val = tg->mode << TPG_DT_n_CFG_2_PAYLOAD_MODE;
++ val = (tg->mode - 1) << TPG_DT_n_CFG_2_PAYLOAD_MODE;
+ val |= 0xBE << TPG_DT_n_CFG_2_USER_SPECIFIED_PAYLOAD;
+ val |= format->decode_format << TPG_DT_n_CFG_2_ENCODE_FORMAT;
+ writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_2(0));
+@@ -449,6 +446,8 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
+ writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0);
+
+ val = 1 << CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
++ if (vc > 3)
++ val |= 1 << CSI2_RX_CFG1_VC_MODE;
+ val |= 1 << CSI2_RX_CFG1_MISR_EN;
+ writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1);
+
+diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+index 04baa80494c66..4dba61b8d3f2a 100644
+--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
++++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+@@ -476,7 +476,7 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
+
+ settle_cnt = csiphy_settle_cnt_calc(link_freq, csiphy->timer_clk_rate);
+
+- val = is_gen2 ? BIT(7) : CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
++ val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
+ for (i = 0; i < c->num_data; i++)
+ val |= BIT(c->data[i].pos * 2);
+
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+index 02494c89da91c..168baaa80d4e6 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
+@@ -7,7 +7,6 @@
+ * Copyright (C) 2020-2021 Linaro Ltd.
+ */
+
+-#include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -494,35 +493,20 @@ static int vfe_enable_output(struct vfe_line *line)
+ return 0;
+ }
+
+-static int vfe_disable_output(struct vfe_line *line)
++static void vfe_disable_output(struct vfe_line *line)
+ {
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ unsigned long flags;
+ unsigned int i;
+- bool done;
+- int timeout = 0;
+-
+- do {
+- spin_lock_irqsave(&vfe->output_lock, flags);
+- done = !output->gen2.active_num;
+- spin_unlock_irqrestore(&vfe->output_lock, flags);
+- usleep_range(10000, 20000);
+-
+- if (timeout++ == 100) {
+- dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
+- vfe_reset(vfe);
+- output->gen2.active_num = 0;
+- return 0;
+- }
+- } while (!done);
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+ for (i = 0; i < output->wm_num; i++)
+ vfe_wm_stop(vfe, output->wm_idx[i]);
++ output->gen2.active_num = 0;
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+- return 0;
++ vfe_reset(vfe);
+ }
+
+ /*
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+index f70aad2e8c237..8ddb8016434ae 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+@@ -8,7 +8,6 @@
+ * Copyright (C) 2021 Jonathan Marek
+ */
+
+-#include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -328,35 +327,20 @@ static int vfe_enable_output(struct vfe_line *line)
+ return 0;
+ }
+
+-static int vfe_disable_output(struct vfe_line *line)
++static void vfe_disable_output(struct vfe_line *line)
+ {
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ unsigned long flags;
+ unsigned int i;
+- bool done;
+- int timeout = 0;
+-
+- do {
+- spin_lock_irqsave(&vfe->output_lock, flags);
+- done = !output->gen2.active_num;
+- spin_unlock_irqrestore(&vfe->output_lock, flags);
+- usleep_range(10000, 20000);
+-
+- if (timeout++ == 100) {
+- dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
+- vfe_reset(vfe);
+- output->gen2.active_num = 0;
+- return 0;
+- }
+- } while (!done);
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+ for (i = 0; i < output->wm_num; i++)
+ vfe_wm_stop(vfe, output->wm_idx[i]);
++ output->gen2.active_num = 0;
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+- return 0;
++ vfe_reset(vfe);
+ }
+
+ /*
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
+index 06c95568e5af4..965500b83d073 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe.c
+@@ -535,7 +535,8 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
+ struct camss_clock *clock = &vfe->clock[i];
+
+ if (!strcmp(clock->name, "vfe0") ||
+- !strcmp(clock->name, "vfe1")) {
++ !strcmp(clock->name, "vfe1") ||
++ !strcmp(clock->name, "vfe_lite")) {
+ u64 min_rate = 0;
+ unsigned long rate;
+
+@@ -611,7 +612,7 @@ int vfe_get(struct vfe_device *vfe)
+ } else {
+ ret = vfe_check_clock_rates(vfe);
+ if (ret < 0)
+- goto error_pm_runtime_get;
++ goto error_pm_domain;
+ }
+ vfe->power_count++;
+
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index 1ef26aea3eae6..62e2e8bd3eb52 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -1627,6 +1627,12 @@ static int camss_probe(struct platform_device *pdev)
+ if (ret < 0)
+ goto err_cleanup;
+
++ ret = camss_configure_pd(camss);
++ if (ret < 0) {
++ dev_err(dev, "Failed to configure power domains: %d\n", ret);
++ goto err_cleanup;
++ }
++
+ ret = camss_init_subdevices(camss);
+ if (ret < 0)
+ goto err_cleanup;
+@@ -1679,12 +1685,6 @@ static int camss_probe(struct platform_device *pdev)
+ }
+ }
+
+- ret = camss_configure_pd(camss);
+- if (ret < 0) {
+- dev_err(dev, "Failed to configure power domains: %d\n", ret);
+- return ret;
+- }
+-
+ pm_runtime_enable(dev);
+
+ return 0;
+diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
+index 3d5dadfa19009..3e85bd85066b7 100644
+--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
++++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
+@@ -398,7 +398,7 @@ session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
+ memcpy(&bufreq[idx], buf_req, sizeof(*bufreq));
+ idx++;
+
+- if (idx > HFI_BUFFER_TYPE_MAX)
++ if (idx >= HFI_BUFFER_TYPE_MAX)
+ return HFI_ERR_SESSION_INVALID_PARAMETER;
+
+ req_bytes -= sizeof(struct hfi_buffer_requirements);
+diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
+index 6cf74b2bc5ae3..c43839539d4dd 100644
+--- a/drivers/media/platform/qcom/venus/hfi_parser.c
++++ b/drivers/media/platform/qcom/venus/hfi_parser.c
+@@ -19,6 +19,9 @@ static void init_codecs(struct venus_core *core)
+ struct hfi_plat_caps *caps = core->caps, *cap;
+ unsigned long bit;
+
++ if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM)
++ return;
++
+ for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
+ cap = &caps[core->codecs_count++];
+ cap->codec = BIT(bit);
+@@ -86,6 +89,9 @@ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
+ {
+ const struct hfi_profile_level *pl = data;
+
++ if (cap->num_pl + num >= HFI_MAX_PROFILE_COUNT)
++ return;
++
+ memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
+ cap->num_pl += num;
+ }
+@@ -111,6 +117,9 @@ fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num)
+ {
+ const struct hfi_capability *caps = data;
+
++ if (cap->num_caps + num >= MAX_CAP_ENTRIES)
++ return;
++
+ memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
+ cap->num_caps += num;
+ }
+@@ -137,6 +146,9 @@ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
+ {
+ const struct raw_formats *formats = fmts;
+
++ if (cap->num_fmts + num_fmts >= MAX_FMT_ENTRIES)
++ return;
++
+ memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
+ cap->num_fmts += num_fmts;
+ }
+@@ -159,6 +171,9 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
+ rawfmts[i].buftype = fmt->buffer_type;
+ i++;
+
++ if (i >= MAX_FMT_ENTRIES)
++ return;
++
+ if (pinfo->num_planes > MAX_PLANES)
+ break;
+
+diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
+index 5506a0d196ef9..90f09cc5c600d 100644
+--- a/drivers/media/platform/qcom/venus/hfi_venus.c
++++ b/drivers/media/platform/qcom/venus/hfi_venus.c
+@@ -205,6 +205,11 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
+
+ new_wr_idx = wr_idx + dwords;
+ wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
++
++ if (wr_ptr < (u32 *)queue->qmem.kva ||
++ wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr)))
++ return -EINVAL;
++
+ if (new_wr_idx < qsize) {
+ memcpy(wr_ptr, packet, dwords << 2);
+ } else {
+@@ -272,6 +277,11 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
+ }
+
+ rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
++
++ if (rd_ptr < (u32 *)queue->qmem.kva ||
++ rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr)))
++ return -EINVAL;
++
+ dwords = *rd_ptr >> 2;
+ if (!dwords)
+ return -EINVAL;
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 74546f7e34691..5719dda6e0f0e 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -2427,6 +2427,12 @@ static int imon_probe(struct usb_interface *interface,
+ goto fail;
+ }
+
++ if (first_if->dev.driver != interface->dev.driver) {
++ dev_err(&interface->dev, "inconsistent driver matching\n");
++ ret = -EINVAL;
++ goto fail;
++ }
++
+ if (ifnum == 0) {
+ ictx = imon_init_intf0(interface, id);
+ if (!ictx) {
+diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
+index 3d8488c39c561..3311099cbd573 100644
+--- a/drivers/media/rc/ir-sharp-decoder.c
++++ b/drivers/media/rc/ir-sharp-decoder.c
+@@ -15,7 +15,9 @@
+ #define SHARP_UNIT 40 /* us */
+ #define SHARP_BIT_PULSE (8 * SHARP_UNIT) /* 320us */
+ #define SHARP_BIT_0_PERIOD (25 * SHARP_UNIT) /* 1ms (680us space) */
+-#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680ms space) */
++#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680us space) */
++#define SHARP_BIT_0_SPACE (17 * SHARP_UNIT) /* 680us space */
++#define SHARP_BIT_1_SPACE (42 * SHARP_UNIT) /* 1680us space */
+ #define SHARP_ECHO_SPACE (1000 * SHARP_UNIT) /* 40 ms */
+ #define SHARP_TRAILER_SPACE (125 * SHARP_UNIT) /* 5 ms (even longer) */
+
+@@ -168,8 +170,8 @@ static const struct ir_raw_timings_pd ir_sharp_timings = {
+ .header_pulse = 0,
+ .header_space = 0,
+ .bit_pulse = SHARP_BIT_PULSE,
+- .bit_space[0] = SHARP_BIT_0_PERIOD,
+- .bit_space[1] = SHARP_BIT_1_PERIOD,
++ .bit_space[0] = SHARP_BIT_0_SPACE,
++ .bit_space[1] = SHARP_BIT_1_SPACE,
+ .trailer_pulse = SHARP_BIT_PULSE,
+ .trailer_space = SHARP_ECHO_SPACE,
+ .msb_first = 1,
+diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
+index 043d23aaa3cbc..a537734832c50 100644
+--- a/drivers/media/rc/lirc_dev.c
++++ b/drivers/media/rc/lirc_dev.c
+@@ -276,7 +276,11 @@ static ssize_t lirc_transmit(struct file *file, const char __user *buf,
+ if (ret < 0)
+ goto out_kfree_raw;
+
+- count = ret;
++ /* drop trailing space */
++ if (!(ret % 2))
++ count = ret - 1;
++ else
++ count = ret;
+
+ txbuf = kmalloc_array(count, sizeof(unsigned int), GFP_KERNEL);
+ if (!txbuf) {
+diff --git a/drivers/media/test-drivers/vivid/vivid-rds-gen.c b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
+index b5b104ee64c99..c57771119a34b 100644
+--- a/drivers/media/test-drivers/vivid/vivid-rds-gen.c
++++ b/drivers/media/test-drivers/vivid/vivid-rds-gen.c
+@@ -145,7 +145,7 @@ void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
+ rds->ta = alt;
+ rds->ms = true;
+ snprintf(rds->psname, sizeof(rds->psname), "%6d.%1d",
+- freq / 16, ((freq & 0xf) * 10) / 16);
++ (freq / 16) % 1000000, (((freq & 0xf) * 10) / 16) % 10);
+ if (alt)
+ strscpy(rds->radiotext,
+ " The Radio Data System can switch between different Radio Texts ",
+diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
+index 46ed95483e222..5f5fa851ca640 100644
+--- a/drivers/media/usb/gspca/cpia1.c
++++ b/drivers/media/usb/gspca/cpia1.c
+@@ -18,6 +18,7 @@
+
+ #include <linux/input.h>
+ #include <linux/sched/signal.h>
++#include <linux/bitops.h>
+
+ #include "gspca.h"
+
+@@ -1028,6 +1029,8 @@ static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
+ sd->params.exposure.expMode = 2;
+ sd->exposure_status = EXPOSURE_NORMAL;
+ }
++ if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp))
++ return -EINVAL;
+ currentexp = currentexp << sd->params.exposure.gain;
+ sd->params.exposure.gain = 0;
+ /* round down current exposure to nearest value */
+diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
+index 699f44ffff0e4..ae5759200622c 100644
+--- a/drivers/mfd/intel-lpss-pci.c
++++ b/drivers/mfd/intel-lpss-pci.c
+@@ -561,6 +561,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0xa3e2), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e3), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e6), (kernel_ulong_t)&spt_uart_info },
++ /* LNL-M */
++ { PCI_VDEVICE(INTEL, 0xa825), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0xa826), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0xa827), (kernel_ulong_t)&tgl_info },
++ { PCI_VDEVICE(INTEL, 0xa830), (kernel_ulong_t)&tgl_info },
++ { PCI_VDEVICE(INTEL, 0xa846), (kernel_ulong_t)&tgl_info },
++ { PCI_VDEVICE(INTEL, 0xa850), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa851), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa852), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0xa878), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa879), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa87a), (kernel_ulong_t)&ehl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa87b), (kernel_ulong_t)&ehl_i2c_info },
+ { }
+ };
+ MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
+diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
+index 7e2cd79d17ebf..8e449cff5cec4 100644
+--- a/drivers/mfd/qcom-spmi-pmic.c
++++ b/drivers/mfd/qcom-spmi-pmic.c
+@@ -30,6 +30,8 @@ struct qcom_spmi_dev {
+ struct qcom_spmi_pmic pmic;
+ };
+
++static DEFINE_MUTEX(pmic_spmi_revid_lock);
++
+ #define N_USIDS(n) ((void *)n)
+
+ static const struct of_device_id pmic_spmi_id_table[] = {
+@@ -76,24 +78,21 @@ static const struct of_device_id pmic_spmi_id_table[] = {
+ *
+ * This only supports PMICs with 1 or 2 USIDs.
+ */
+-static struct spmi_device *qcom_pmic_get_base_usid(struct device *dev)
++static struct spmi_device *qcom_pmic_get_base_usid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
+ {
+- struct spmi_device *sdev;
+- struct qcom_spmi_dev *ctx;
+ struct device_node *spmi_bus;
+- struct device_node *other_usid = NULL;
++ struct device_node *child;
+ int function_parent_usid, ret;
+ u32 pmic_addr;
+
+- sdev = to_spmi_device(dev);
+- ctx = dev_get_drvdata(&sdev->dev);
+-
+ /*
+ * Quick return if the function device is already in the base
+ * USID. This will always be hit for PMICs with only 1 USID.
+ */
+- if (sdev->usid % ctx->num_usids == 0)
++ if (sdev->usid % ctx->num_usids == 0) {
++ get_device(&sdev->dev);
+ return sdev;
++ }
+
+ function_parent_usid = sdev->usid;
+
+@@ -105,28 +104,61 @@ static struct spmi_device *qcom_pmic_get_base_usid(struct device *dev)
+ * device for USID 2.
+ */
+ spmi_bus = of_get_parent(sdev->dev.of_node);
+- do {
+- other_usid = of_get_next_child(spmi_bus, other_usid);
+-
+- ret = of_property_read_u32_index(other_usid, "reg", 0, &pmic_addr);
+- if (ret)
+- return ERR_PTR(ret);
++ sdev = ERR_PTR(-ENODATA);
++ for_each_child_of_node(spmi_bus, child) {
++ ret = of_property_read_u32_index(child, "reg", 0, &pmic_addr);
++ if (ret) {
++ of_node_put(child);
++ sdev = ERR_PTR(ret);
++ break;
++ }
+
+- sdev = spmi_device_from_of(other_usid);
+ if (pmic_addr == function_parent_usid - (ctx->num_usids - 1)) {
+- if (!sdev)
++ sdev = spmi_device_from_of(child);
++ if (!sdev) {
+ /*
+- * If the base USID for this PMIC hasn't probed yet
+- * but the secondary USID has, then we need to defer
+- * the function driver so that it will attempt to
+- * probe again when the base USID is ready.
++ * If the base USID for this PMIC hasn't been
++ * registered yet then we need to defer.
+ */
+- return ERR_PTR(-EPROBE_DEFER);
+- return sdev;
++ sdev = ERR_PTR(-EPROBE_DEFER);
++ }
++ of_node_put(child);
++ break;
+ }
+- } while (other_usid->sibling);
++ }
+
+- return ERR_PTR(-ENODATA);
++ of_node_put(spmi_bus);
++
++ return sdev;
++}
++
++static int pmic_spmi_get_base_revid(struct spmi_device *sdev, struct qcom_spmi_dev *ctx)
++{
++ struct qcom_spmi_dev *base_ctx;
++ struct spmi_device *base;
++ int ret = 0;
++
++ base = qcom_pmic_get_base_usid(sdev, ctx);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ /*
++ * Copy revid info from base device if it has probed and is still
++ * bound to its driver.
++ */
++ mutex_lock(&pmic_spmi_revid_lock);
++ base_ctx = spmi_device_get_drvdata(base);
++ if (!base_ctx) {
++ ret = -EPROBE_DEFER;
++ goto out_unlock;
++ }
++ memcpy(&ctx->pmic, &base_ctx->pmic, sizeof(ctx->pmic));
++out_unlock:
++ mutex_unlock(&pmic_spmi_revid_lock);
++
++ put_device(&base->dev);
++
++ return ret;
+ }
+
+ static int pmic_spmi_load_revid(struct regmap *map, struct device *dev,
+@@ -204,11 +236,7 @@ const struct qcom_spmi_pmic *qcom_pmic_get(struct device *dev)
+ if (!of_match_device(pmic_spmi_id_table, dev->parent))
+ return ERR_PTR(-EINVAL);
+
+- sdev = qcom_pmic_get_base_usid(dev->parent);
+-
+- if (IS_ERR(sdev))
+- return ERR_CAST(sdev);
+-
++ sdev = to_spmi_device(dev->parent);
+ spmi = dev_get_drvdata(&sdev->dev);
+
+ return &spmi->pmic;
+@@ -243,16 +271,31 @@ static int pmic_spmi_probe(struct spmi_device *sdev)
+ ret = pmic_spmi_load_revid(regmap, &sdev->dev, &ctx->pmic);
+ if (ret < 0)
+ return ret;
++ } else {
++ ret = pmic_spmi_get_base_revid(sdev, ctx);
++ if (ret)
++ return ret;
+ }
++
++ mutex_lock(&pmic_spmi_revid_lock);
+ spmi_device_set_drvdata(sdev, ctx);
++ mutex_unlock(&pmic_spmi_revid_lock);
+
+ return devm_of_platform_populate(&sdev->dev);
+ }
+
++static void pmic_spmi_remove(struct spmi_device *sdev)
++{
++ mutex_lock(&pmic_spmi_revid_lock);
++ spmi_device_set_drvdata(sdev, NULL);
++ mutex_unlock(&pmic_spmi_revid_lock);
++}
++
+ MODULE_DEVICE_TABLE(of, pmic_spmi_id_table);
+
+ static struct spmi_driver pmic_spmi_driver = {
+ .probe = pmic_spmi_probe,
++ .remove = pmic_spmi_remove,
+ .driver = {
+ .name = "pmic-spmi",
+ .of_match_table = pmic_spmi_id_table,
+diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
+index 7e1acc68d4359..af519088732d9 100644
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -82,6 +82,7 @@
+ #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
+ #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
+ #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
++#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
+
+ static DEFINE_IDA(pci_endpoint_test_ida);
+
+@@ -991,6 +992,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
++ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
++ .driver_data = (kernel_ulong_t)&default_data,
++ },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
+ .driver_data = (kernel_ulong_t)&j721e_data,
+ },
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index b66aa5de2ddec..e4e6556a9840c 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -2389,8 +2389,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
+ }
+ ret = mmc_blk_cqe_issue_flush(mq, req);
+ break;
+- case REQ_OP_READ:
+ case REQ_OP_WRITE:
++ card->written_flag = true;
++ fallthrough;
++ case REQ_OP_READ:
+ if (host->cqe_enabled)
+ ret = mmc_blk_cqe_issue_rw_rq(mq, req);
+ else
+diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
+index 4edf9057fa79d..b7754a1b8d978 100644
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -280,4 +280,8 @@ static inline int mmc_card_broken_sd_cache(const struct mmc_card *c)
+ return c->quirks & MMC_QUIRK_BROKEN_SD_CACHE;
+ }
+
++static inline int mmc_card_broken_cache_flush(const struct mmc_card *c)
++{
++ return c->quirks & MMC_QUIRK_BROKEN_CACHE_FLUSH;
++}
+ #endif
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 89cd48fcec79f..a46ce0868fe1f 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -2081,13 +2081,17 @@ static int _mmc_flush_cache(struct mmc_host *host)
+ {
+ int err = 0;
+
++ if (mmc_card_broken_cache_flush(host->card) && !host->card->written_flag)
++ return 0;
++
+ if (_mmc_cache_enabled(host)) {
+ err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 1,
+ CACHE_FLUSH_TIMEOUT_MS);
+ if (err)
+- pr_err("%s: cache flush error %d\n",
+- mmc_hostname(host), err);
++ pr_err("%s: cache flush error %d\n", mmc_hostname(host), err);
++ else
++ host->card->written_flag = false;
+ }
+
+ return err;
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index 32b64b564fb1f..cca71867bc4ad 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -110,11 +110,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ MMC_QUIRK_TRIM_BROKEN),
+
+ /*
+- * Micron MTFC4GACAJCN-1M advertises TRIM but it does not seems to
+- * support being used to offload WRITE_ZEROES.
++ * Micron MTFC4GACAJCN-1M supports TRIM but does not appear to support
++ * WRITE_ZEROES offloading. It also supports caching, but the cache can
++ * only be flushed after a write has occurred.
+ */
+ MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc,
+- MMC_QUIRK_TRIM_BROKEN),
++ MMC_QUIRK_TRIM_BROKEN | MMC_QUIRK_BROKEN_CACHE_FLUSH),
+
+ /*
+ * Kingston EMMC04G-M627 advertises TRIM but it does not seems to
+diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
+index ee9a25b900aec..6cdab4a0f0bb2 100644
+--- a/drivers/mmc/host/meson-gx-mmc.c
++++ b/drivers/mmc/host/meson-gx-mmc.c
+@@ -801,7 +801,6 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
+
+ cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
+ cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
+- cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */
+
+ meson_mmc_set_response_bits(cmd, &cmd_cfg);
+
+diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
+index 109d4b010f978..d8a991b349a82 100644
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -25,6 +25,12 @@
+ #define GLI_9750_WT_EN_ON 0x1
+ #define GLI_9750_WT_EN_OFF 0x0
+
++#define PCI_GLI_9750_PM_CTRL 0xFC
++#define PCI_GLI_9750_PM_STATE GENMASK(1, 0)
++
++#define PCI_GLI_9750_CORRERR_MASK 0x214
++#define PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12)
++
+ #define SDHCI_GLI_9750_CFG2 0x848
+ #define SDHCI_GLI_9750_CFG2_L1DLY GENMASK(28, 24)
+ #define GLI_9750_CFG2_L1DLY_VALUE 0x1F
+@@ -149,6 +155,9 @@
+ #define PCI_GLI_9755_PM_CTRL 0xFC
+ #define PCI_GLI_9755_PM_STATE GENMASK(1, 0)
+
++#define PCI_GLI_9755_CORRERR_MASK 0x214
++#define PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT BIT(12)
++
+ #define SDHCI_GLI_9767_GM_BURST_SIZE 0x510
+ #define SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET BIT(8)
+
+@@ -536,8 +545,12 @@ static void sdhci_gl9750_set_clock(struct sdhci_host *host, unsigned int clock)
+
+ static void gl9750_hw_setting(struct sdhci_host *host)
+ {
++ struct sdhci_pci_slot *slot = sdhci_priv(host);
++ struct pci_dev *pdev;
+ u32 value;
+
++ pdev = slot->chip->pdev;
++
+ gl9750_wt_on(host);
+
+ value = sdhci_readl(host, SDHCI_GLI_9750_CFG2);
+@@ -547,6 +560,18 @@ static void gl9750_hw_setting(struct sdhci_host *host)
+ GLI_9750_CFG2_L1DLY_VALUE);
+ sdhci_writel(host, value, SDHCI_GLI_9750_CFG2);
+
++ /* toggle PM state to allow GL9750 to enter ASPM L1.2 */
++ pci_read_config_dword(pdev, PCI_GLI_9750_PM_CTRL, &value);
++ value |= PCI_GLI_9750_PM_STATE;
++ pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
++ value &= ~PCI_GLI_9750_PM_STATE;
++ pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
++
++ /* mask the replay timer timeout of AER */
++ pci_read_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, &value);
++ value |= PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
++ pci_write_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, value);
++
+ gl9750_wt_off(host);
+ }
+
+@@ -756,6 +781,11 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
+ value &= ~PCI_GLI_9755_PM_STATE;
+ pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value);
+
++ /* mask the replay timer timeout of AER */
++ pci_read_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, &value);
++ value |= PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
++ pci_write_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, value);
++
+ gl9755_wt_off(pdev);
+ }
+
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index 7cdf0f54e3a50..5d1faa8fbfbf1 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -598,7 +598,7 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
+ return 0;
+ }
+
+- for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) {
++ for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) {
+
+ ret = device_property_read_u32(dev, td[i].otap_binding,
+ &sdhci_am654->otap_del_sel[i]);
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index 9ec593d52f0fa..cef0e716ad16f 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -2309,6 +2309,7 @@ static int vub300_probe(struct usb_interface *interface,
+ vub300->read_only =
+ (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
+ } else {
++ retval = -EINVAL;
+ goto error5;
+ }
+ usb_set_intfdata(interface, vub300);
+diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
+index 11b06fefaa0e2..c10693ba265ba 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -422,9 +422,25 @@ read_pri_intelext(struct map_info *map, __u16 adr)
+ extra_size = 0;
+
+ /* Protection Register info */
+- if (extp->NumProtectionFields)
++ if (extp->NumProtectionFields) {
++ struct cfi_intelext_otpinfo *otp =
++ (struct cfi_intelext_otpinfo *)&extp->extra[0];
++
+ extra_size += (extp->NumProtectionFields - 1) *
+- sizeof(struct cfi_intelext_otpinfo);
++ sizeof(struct cfi_intelext_otpinfo);
++
++ if (extp_size >= sizeof(*extp) + extra_size) {
++ int i;
++
++ /* Do some byteswapping if necessary */
++ for (i = 0; i < extp->NumProtectionFields - 1; i++) {
++ otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
++ otp->FactGroups = le16_to_cpu(otp->FactGroups);
++ otp->UserGroups = le16_to_cpu(otp->UserGroups);
++ otp++;
++ }
++ }
++ }
+ }
+
+ if (extp->MinorVersion >= '1') {
+diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
+index a9909eb081244..8231e9828dce7 100644
+--- a/drivers/mtd/nand/raw/intel-nand-controller.c
++++ b/drivers/mtd/nand/raw/intel-nand-controller.c
+@@ -619,6 +619,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
+ ebu_host->cs_num = cs;
+
+ resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
++ if (!resname) {
++ ret = -ENOMEM;
++ goto err_of_node_put;
++ }
++
+ ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
+ resname);
+ if (IS_ERR(ebu_host->cs[cs].chipaddr)) {
+@@ -655,6 +660,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
+ }
+
+ resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
++ if (!resname) {
++ ret = -ENOMEM;
++ goto err_cleanup_dma;
++ }
++
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
+ if (!res) {
+ ret = -EINVAL;
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index b10011dec1e62..b16adc0e92e9d 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -1115,6 +1115,9 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
+ init.name = devm_kasprintf(nfc->dev,
+ GFP_KERNEL, "%s#div",
+ dev_name(nfc->dev));
++ if (!init.name)
++ return -ENOMEM;
++
+ init.ops = &clk_divider_ops;
+ nfc_divider_parent_data[0].fw_name = "device";
+ init.parent_data = nfc_divider_parent_data;
+diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
+index eb0b9d16e8dae..a553e3ac8ff41 100644
+--- a/drivers/mtd/nand/raw/tegra_nand.c
++++ b/drivers/mtd/nand/raw/tegra_nand.c
+@@ -1197,6 +1197,10 @@ static int tegra_nand_probe(struct platform_device *pdev)
+ init_completion(&ctrl->dma_complete);
+
+ ctrl->irq = platform_get_irq(pdev, 0);
++ if (ctrl->irq < 0) {
++ err = ctrl->irq;
++ goto err_put_pm;
++ }
+ err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
+ dev_name(&pdev->dev), ctrl);
+ if (err) {
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index a64ebb7f5b712..363b6cb33ae08 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1499,6 +1499,10 @@ done:
+ static void bond_setup_by_slave(struct net_device *bond_dev,
+ struct net_device *slave_dev)
+ {
++ bool was_up = !!(bond_dev->flags & IFF_UP);
++
++ dev_close(bond_dev);
++
+ bond_dev->header_ops = slave_dev->header_ops;
+
+ bond_dev->type = slave_dev->type;
+@@ -1513,6 +1517,8 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
+ bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
+ }
++ if (was_up)
++ dev_open(bond_dev, NULL);
+ }
+
+ /* On bonding slaves other than the currently active slave, suppress
+diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
+index d8ab2b77d201e..167a86f39f277 100644
+--- a/drivers/net/dsa/lan9303_mdio.c
++++ b/drivers/net/dsa/lan9303_mdio.c
+@@ -32,7 +32,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
+ struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+
+ reg <<= 2; /* reg num to offset */
+- mutex_lock(&sw_dev->device->bus->mdio_lock);
++ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
+ lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
+ mutex_unlock(&sw_dev->device->bus->mdio_lock);
+@@ -50,7 +50,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
+ struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+
+ reg <<= 2; /* reg num to offset */
+- mutex_lock(&sw_dev->device->bus->mdio_lock);
++ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ *val = lan9303_mdio_real_read(sw_dev->device, reg);
+ *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
+ mutex_unlock(&sw_dev->device->bus->mdio_lock);
+diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
+index 045fe133f6ee9..5beadabc21361 100644
+--- a/drivers/net/ethernet/amd/pds_core/adminq.c
++++ b/drivers/net/ethernet/amd/pds_core/adminq.c
+@@ -146,7 +146,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
+ }
+
+ queue_work(pdsc->wq, &qcq->work);
+- pds_core_intr_mask(&pdsc->intr_ctrl[irq], PDS_CORE_INTR_MASK_CLEAR);
++ pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
+
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
+index e545fafc48196..b1c1f1007b065 100644
+--- a/drivers/net/ethernet/amd/pds_core/core.h
++++ b/drivers/net/ethernet/amd/pds_core/core.h
+@@ -15,7 +15,7 @@
+ #define PDSC_DRV_DESCRIPTION "AMD/Pensando Core Driver"
+
+ #define PDSC_WATCHDOG_SECS 5
+-#define PDSC_QUEUE_NAME_MAX_SZ 32
++#define PDSC_QUEUE_NAME_MAX_SZ 16
+ #define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
+ #define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
+ #define PDSC_TEARDOWN_RECOVERY false
+diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
+index f77cd9f5a2fda..eb178728edba9 100644
+--- a/drivers/net/ethernet/amd/pds_core/dev.c
++++ b/drivers/net/ethernet/amd/pds_core/dev.c
+@@ -254,10 +254,14 @@ static int pdsc_identify(struct pdsc *pdsc)
+ struct pds_core_drv_identity drv = {};
+ size_t sz;
+ int err;
++ int n;
+
+ drv.drv_type = cpu_to_le32(PDS_DRIVER_LINUX);
+- snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
+- "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
++ /* Catching the return quiets a Wformat-truncation complaint */
++ n = snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
++ "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
++ if (n > sizeof(drv.driver_ver_str))
++ dev_dbg(pdsc->dev, "release name truncated, don't care\n");
+
+ /* Next let's get some info about the device
+ * We use the devcmd_lock at this level in order to
+diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
+index d9607033bbf21..d2abf32b93fe3 100644
+--- a/drivers/net/ethernet/amd/pds_core/devlink.c
++++ b/drivers/net/ethernet/amd/pds_core/devlink.c
+@@ -104,7 +104,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct pds_core_fw_list_info fw_list;
+ struct pdsc *pdsc = devlink_priv(dl);
+ union pds_core_dev_comp comp;
+- char buf[16];
++ char buf[32];
+ int listlen;
+ int err;
+ int i;
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
+index 43d821fe7a542..63ba64dbb7310 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
+@@ -504,15 +504,12 @@ struct atl1c_rrd_ring {
+ u16 next_to_use;
+ u16 next_to_clean;
+ struct napi_struct napi;
+- struct page *rx_page;
+- unsigned int rx_page_offset;
+ };
+
+ /* board specific private data structure */
+ struct atl1c_adapter {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+- unsigned int rx_frag_size;
+ struct atl1c_hw hw;
+ struct atl1c_hw_stats hw_stats;
+ struct mii_if_info mii; /* MII interface info */
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index 940c5d1ff9cfc..74b78164cf74a 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -483,15 +483,10 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
+ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
+ struct net_device *dev)
+ {
+- unsigned int head_size;
+ int mtu = dev->mtu;
+
+ adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
+ roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
+-
+- head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) +
+- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+- adapter->rx_frag_size = roundup_pow_of_two(head_size);
+ }
+
+ static netdev_features_t atl1c_fix_features(struct net_device *netdev,
+@@ -964,7 +959,6 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
+ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
+ {
+ struct pci_dev *pdev = adapter->pdev;
+- int i;
+
+ dma_free_coherent(&pdev->dev, adapter->ring_header.size,
+ adapter->ring_header.desc, adapter->ring_header.dma);
+@@ -977,12 +971,6 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
+ kfree(adapter->tpd_ring[0].buffer_info);
+ adapter->tpd_ring[0].buffer_info = NULL;
+ }
+- for (i = 0; i < adapter->rx_queue_count; ++i) {
+- if (adapter->rrd_ring[i].rx_page) {
+- put_page(adapter->rrd_ring[i].rx_page);
+- adapter->rrd_ring[i].rx_page = NULL;
+- }
+- }
+ }
+
+ /**
+@@ -1754,48 +1742,11 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
+ skb_checksum_none_assert(skb);
+ }
+
+-static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
+- u32 queue, bool napi_mode)
+-{
+- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
+- struct sk_buff *skb;
+- struct page *page;
+-
+- if (adapter->rx_frag_size > PAGE_SIZE) {
+- if (likely(napi_mode))
+- return napi_alloc_skb(&rrd_ring->napi,
+- adapter->rx_buffer_len);
+- else
+- return netdev_alloc_skb_ip_align(adapter->netdev,
+- adapter->rx_buffer_len);
+- }
+-
+- page = rrd_ring->rx_page;
+- if (!page) {
+- page = alloc_page(GFP_ATOMIC);
+- if (unlikely(!page))
+- return NULL;
+- rrd_ring->rx_page = page;
+- rrd_ring->rx_page_offset = 0;
+- }
+-
+- skb = build_skb(page_address(page) + rrd_ring->rx_page_offset,
+- adapter->rx_frag_size);
+- if (likely(skb)) {
+- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+- rrd_ring->rx_page_offset += adapter->rx_frag_size;
+- if (rrd_ring->rx_page_offset >= PAGE_SIZE)
+- rrd_ring->rx_page = NULL;
+- else
+- get_page(page);
+- }
+- return skb;
+-}
+-
+ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ bool napi_mode)
+ {
+ struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
++ struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
+ struct pci_dev *pdev = adapter->pdev;
+ struct atl1c_buffer *buffer_info, *next_info;
+ struct sk_buff *skb;
+@@ -1814,13 +1765,27 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ while (next_info->flags & ATL1C_BUFFER_FREE) {
+ rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
+
+- skb = atl1c_alloc_skb(adapter, queue, napi_mode);
++ /* When DMA RX address is set to something like
++ * 0x....fc0, it will be very likely to cause DMA
++ * RFD overflow issue.
++ *
++ * To work around it, we apply rx skb with 64 bytes
++ * longer space, and offset the address whenever
++ * 0x....fc0 is detected.
++ */
++ if (likely(napi_mode))
++ skb = napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len + 64);
++ else
++ skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64);
+ if (unlikely(!skb)) {
+ if (netif_msg_rx_err(adapter))
+ dev_warn(&pdev->dev, "alloc rx buffer failed\n");
+ break;
+ }
+
++ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
++ skb_reserve(skb, 64);
++
+ /*
+ * Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
+index 5715b9ab2712e..e7137b468f5bc 100644
+--- a/drivers/net/ethernet/cortina/gemini.c
++++ b/drivers/net/ethernet/cortina/gemini.c
+@@ -432,8 +432,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = {
+ .val = CONFIG0_MAXLEN_1536,
+ },
+ {
+- .max_l3_len = 1542,
+- .val = CONFIG0_MAXLEN_1542,
++ .max_l3_len = 1548,
++ .val = CONFIG0_MAXLEN_1548,
+ },
+ {
+ .max_l3_len = 9212,
+@@ -1145,6 +1145,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ dma_addr_t mapping;
+ unsigned short mtu;
+ void *buffer;
++ int ret;
+
+ mtu = ETH_HLEN;
+ mtu += netdev->mtu;
+@@ -1159,9 +1160,30 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ word3 |= mtu;
+ }
+
+- if (skb->ip_summed != CHECKSUM_NONE) {
++ if (skb->len >= ETH_FRAME_LEN) {
++ /* Hardware offloaded checksumming isn't working on frames
++ * bigger than 1514 bytes. A hypothesis about this is that the
++ * checksum buffer is only 1518 bytes, so when the frames get
++ * bigger they get truncated, or the last few bytes get
++ * overwritten by the FCS.
++ *
++ * Just use software checksumming and bypass on bigger frames.
++ */
++ if (skb->ip_summed == CHECKSUM_PARTIAL) {
++ ret = skb_checksum_help(skb);
++ if (ret)
++ return ret;
++ }
++ word1 |= TSS_BYPASS_BIT;
++ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int tcp = 0;
+
++ /* We do not switch off the checksumming on non TCP/UDP
++ * frames: as is shown from tests, the checksumming engine
++ * is smart enough to see that a frame is not actually TCP
++ * or UDP and then just pass it through without any changes
++ * to the frame.
++ */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ word1 |= TSS_IP_CHKSUM_BIT;
+ tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
+@@ -1978,15 +2000,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
+ return 0;
+ }
+
+-static netdev_features_t gmac_fix_features(struct net_device *netdev,
+- netdev_features_t features)
+-{
+- if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
+- features &= ~GMAC_OFFLOAD_FEATURES;
+-
+- return features;
+-}
+-
+ static int gmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+ {
+@@ -2212,7 +2225,6 @@ static const struct net_device_ops gmac_351x_ops = {
+ .ndo_set_mac_address = gmac_set_mac_address,
+ .ndo_get_stats64 = gmac_get_stats64,
+ .ndo_change_mtu = gmac_change_mtu,
+- .ndo_fix_features = gmac_fix_features,
+ .ndo_set_features = gmac_set_features,
+ };
+
+@@ -2464,11 +2476,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
+
+ netdev->hw_features = GMAC_OFFLOAD_FEATURES;
+ netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+- /* We can handle jumbo frames up to 10236 bytes so, let's accept
+- * payloads of 10236 bytes minus VLAN and ethernet header
++ /* We can receive jumbo frames up to 10236 bytes but only
++ * transmit 2047 bytes so, let's accept payloads of 2047
++ * bytes minus VLAN and ethernet header
+ */
+ netdev->min_mtu = ETH_MIN_MTU;
+- netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
++ netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN;
+
+ port->freeq_refill = 0;
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll);
+diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
+index 9fdf77d5eb374..24bb989981f23 100644
+--- a/drivers/net/ethernet/cortina/gemini.h
++++ b/drivers/net/ethernet/cortina/gemini.h
+@@ -502,7 +502,7 @@ union gmac_txdesc_3 {
+ #define SOF_BIT 0x80000000
+ #define EOF_BIT 0x40000000
+ #define EOFIE_BIT BIT(29)
+-#define MTU_SIZE_BIT_MASK 0x1fff
++#define MTU_SIZE_BIT_MASK 0x7ff /* Max MTU 2047 bytes */
+
+ /* GMAC Tx Descriptor */
+ struct gmac_txdesc {
+@@ -787,7 +787,7 @@ union gmac_config0 {
+ #define CONFIG0_MAXLEN_1536 0
+ #define CONFIG0_MAXLEN_1518 1
+ #define CONFIG0_MAXLEN_1522 2
+-#define CONFIG0_MAXLEN_1542 3
++#define CONFIG0_MAXLEN_1548 3
+ #define CONFIG0_MAXLEN_9k 4 /* 9212 */
+ #define CONFIG0_MAXLEN_10k 5 /* 10236 */
+ #define CONFIG0_MAXLEN_1518__6 6
+diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
+index 11b29f56aaf9c..b91abe9efb517 100644
+--- a/drivers/net/ethernet/engleder/tsnep.h
++++ b/drivers/net/ethernet/engleder/tsnep.h
+@@ -142,7 +142,7 @@ struct tsnep_rx {
+
+ struct tsnep_queue {
+ struct tsnep_adapter *adapter;
+- char name[IFNAMSIZ + 9];
++ char name[IFNAMSIZ + 16];
+
+ struct tsnep_tx *tx;
+ struct tsnep_rx *rx;
+diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
+index 479156576bc8a..e3fc894fa3f6f 100644
+--- a/drivers/net/ethernet/engleder/tsnep_main.c
++++ b/drivers/net/ethernet/engleder/tsnep_main.c
+@@ -1778,14 +1778,14 @@ static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
+ dev = queue->adapter;
+ } else {
+ if (queue->tx && queue->rx)
+- sprintf(queue->name, "%s-txrx-%d", name,
+- queue->rx->queue_index);
++ snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d",
++ name, queue->rx->queue_index);
+ else if (queue->tx)
+- sprintf(queue->name, "%s-tx-%d", name,
+- queue->tx->queue_index);
++ snprintf(queue->name, sizeof(queue->name), "%s-tx-%d",
++ name, queue->tx->queue_index);
+ else
+- sprintf(queue->name, "%s-rx-%d", name,
+- queue->rx->queue_index);
++ snprintf(queue->name, sizeof(queue->name), "%s-rx-%d",
++ name, queue->rx->queue_index);
+ handler = tsnep_irq_txrx;
+ dev = queue;
+ }
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 465a6db5a40a8..79bfa2837a0e6 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -255,10 +255,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
+ if (block->tx) {
+ if (block->tx->q_num < priv->tx_cfg.num_queues)
+ reschedule |= gve_tx_poll(block, budget);
+- else
++ else if (budget)
+ reschedule |= gve_xdp_poll(block, budget);
+ }
+
++ if (!budget)
++ return 0;
++
+ if (block->rx) {
+ work_done = gve_rx_poll(block, budget);
+ reschedule |= work_done == budget;
+@@ -299,6 +302,9 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
+ if (block->tx)
+ reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+
++ if (!budget)
++ return 0;
++
+ if (block->rx) {
+ work_done = gve_rx_poll_dqo(block, budget);
+ reschedule |= work_done == budget;
+diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
+index e84a066aa1a40..73655347902d2 100644
+--- a/drivers/net/ethernet/google/gve/gve_rx.c
++++ b/drivers/net/ethernet/google/gve/gve_rx.c
+@@ -1007,10 +1007,6 @@ int gve_rx_poll(struct gve_notify_block *block, int budget)
+
+ feat = block->napi.dev->features;
+
+- /* If budget is 0, do all the work */
+- if (budget == 0)
+- budget = INT_MAX;
+-
+ if (budget > 0)
+ work_done = gve_clean_rx_done(rx, budget, feat);
+
+diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
+index 6957a865cff37..9f6ffc4a54f0b 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx.c
++++ b/drivers/net/ethernet/google/gve/gve_tx.c
+@@ -925,10 +925,6 @@ bool gve_xdp_poll(struct gve_notify_block *block, int budget)
+ bool repoll;
+ u32 to_do;
+
+- /* If budget is 0, do all the work */
+- if (budget == 0)
+- budget = INT_MAX;
+-
+ /* Find out how much work there is to be done */
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = min_t(u32, (nic_done - tx->done), budget);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+index 26fb6fefcb9d9..5d1814ed51427 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -500,11 +500,14 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
+ }
+
+ sprintf(result[j++], "%d", i);
+- sprintf(result[j++], "%s", dim_state_str[dim->state]);
++ sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
++ dim_state_str[dim->state] : "unknown");
+ sprintf(result[j++], "%u", dim->profile_ix);
+- sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]);
++ sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
++ dim_cqe_mode_str[dim->mode] : "unknown");
+ sprintf(result[j++], "%s",
+- dim_tune_stat_str[dim->tune_state]);
++ dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
++ dim_tune_stat_str[dim->tune_state] : "unknown");
+ sprintf(result[j++], "%u", dim->steps_left);
+ sprintf(result[j++], "%u", dim->steps_right);
+ sprintf(result[j++], "%u", dim->tired);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 71a2ec03f2b38..f644210afb70a 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -5139,7 +5139,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hnae3_handle *h = priv->ae_handle;
+- u8 mac_addr_temp[ETH_ALEN];
++ u8 mac_addr_temp[ETH_ALEN] = {0};
+ int ret = 0;
+
+ if (h->ae_algo->ops->get_mac_addr)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index ed6cf59853bf6..0f868605300a2 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -61,6 +61,7 @@ static void hclge_sync_fd_table(struct hclge_dev *hdev);
+ static void hclge_update_fec_stats(struct hclge_dev *hdev);
+ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
+ int wait_cnt);
++static int hclge_update_port_info(struct hclge_dev *hdev);
+
+ static struct hnae3_ae_algo ae_algo;
+
+@@ -3043,6 +3044,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
+
+ if (state != hdev->hw.mac.link) {
+ hdev->hw.mac.link = state;
++ if (state == HCLGE_LINK_STATUS_UP)
++ hclge_update_port_info(hdev);
++
+ client->ops->link_status_change(handle, state);
+ hclge_config_mac_tnl_int(hdev, state);
+ if (rclient && rclient->ops->link_status_change)
+@@ -10026,8 +10030,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+
+- mutex_lock(&hdev->vport_lock);
+-
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->vlan_id == vlan_id) {
+ if (is_write_tbl && vlan->hd_tbl_status)
+@@ -10042,8 +10044,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ break;
+ }
+ }
+-
+- mutex_unlock(&hdev->vport_lock);
+ }
+
+ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
+@@ -10452,11 +10452,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ * handle mailbox. Just record the vlan id, and remove it after
+ * reset finished.
+ */
++ mutex_lock(&hdev->vport_lock);
+ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
++ mutex_unlock(&hdev->vport_lock);
+ return -EBUSY;
++ } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
++ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ }
++ mutex_unlock(&hdev->vport_lock);
+
+ /* when port base vlan enabled, we use port base vlan as the vlan
+ * filter entry. In this case, we don't update vlan filter table
+@@ -10471,17 +10476,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ }
+
+ if (!ret) {
+- if (!is_kill)
++ if (!is_kill) {
+ hclge_add_vport_vlan_table(vport, vlan_id,
+ writen_to_tbl);
+- else if (is_kill && vlan_id != 0)
++ } else if (is_kill && vlan_id != 0) {
++ mutex_lock(&hdev->vport_lock);
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
++ mutex_unlock(&hdev->vport_lock);
++ }
+ } else if (is_kill) {
+ /* when remove hw vlan filter failed, record the vlan id,
+ * and try to remove it from hw later, to be consistence
+ * with stack
+ */
++ mutex_lock(&hdev->vport_lock);
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
++ mutex_unlock(&hdev->vport_lock);
+ }
+
+ hclge_set_vport_vlan_fltr_change(vport);
+@@ -10521,6 +10531,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+ int i, ret, sync_cnt = 0;
+ u16 vlan_id;
+
++ mutex_lock(&hdev->vport_lock);
+ /* start from vport 1 for PF is always alive */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+@@ -10531,21 +10542,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id, vlan_id,
+ true);
+- if (ret && ret != -EINVAL)
++ if (ret && ret != -EINVAL) {
++ mutex_unlock(&hdev->vport_lock);
+ return;
++ }
+
+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ hclge_set_vport_vlan_fltr_change(vport);
+
+ sync_cnt++;
+- if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
++ if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
++ mutex_unlock(&hdev->vport_lock);
+ return;
++ }
+
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ VLAN_N_VID);
+ }
+ }
++ mutex_unlock(&hdev->vport_lock);
+
+ hclge_sync_vlan_fltr_state(hdev);
+ }
+@@ -11652,6 +11668,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ goto err_msi_irq_uninit;
+
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
++ clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ if (hnae3_dev_phy_imp_supported(hdev))
+ ret = hclge_update_tp_port_info(hdev);
+ else
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index a4d68fb216fb9..0aa9beefd1c7e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1206,6 +1206,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ set_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ return -EBUSY;
++ } else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) {
++ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ }
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+@@ -1233,20 +1235,25 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
+ int ret, sync_cnt = 0;
+ u16 vlan_id;
+
++ if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID))
++ return;
++
++ rtnl_lock();
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ while (vlan_id != VLAN_N_VID) {
+ ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
+ vlan_id, true);
+ if (ret)
+- return;
++ break;
+
+ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ sync_cnt++;
+ if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
+- return;
++ break;
+
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ }
++ rtnl_unlock();
+ }
+
+ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+@@ -1974,8 +1981,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+ return HCLGEVF_VECTOR0_EVENT_OTHER;
+ }
+
++static void hclgevf_reset_timer(struct timer_list *t)
++{
++ struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
++
++ hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
++ hclgevf_reset_task_schedule(hdev);
++}
++
+ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+ {
++#define HCLGEVF_RESET_DELAY 5
++
+ enum hclgevf_evt_cause event_cause;
+ struct hclgevf_dev *hdev = data;
+ u32 clearval;
+@@ -1987,7 +2004,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+
+ switch (event_cause) {
+ case HCLGEVF_VECTOR0_EVENT_RST:
+- hclgevf_reset_task_schedule(hdev);
++ mod_timer(&hdev->reset_timer,
++ jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
+ break;
+ case HCLGEVF_VECTOR0_EVENT_MBX:
+ hclgevf_mbx_handler(hdev);
+@@ -2930,6 +2948,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ HCLGEVF_DRIVER_NAME);
+
+ hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
++ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
+
+ return 0;
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+index 81c16b8c8da29..a73f2bf3a56a6 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -219,6 +219,7 @@ struct hclgevf_dev {
+ enum hnae3_reset_type reset_level;
+ unsigned long reset_pending;
+ enum hnae3_reset_type reset_type;
++ struct timer_list reset_timer;
+
+ #define HCLGEVF_RESET_REQUESTED 0
+ #define HCLGEVF_RESET_PENDING 1
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+index bbf7b14079de3..85c2a634c8f96 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+@@ -63,6 +63,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
+ i++;
+ }
+
++ /* ensure additional_info will be seen after received_resp */
++ smp_rmb();
++
+ if (i >= HCLGEVF_MAX_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
+@@ -178,6 +181,10 @@ static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
+ resp->resp_status = hclgevf_resp_to_errno(resp_status);
+ memcpy(resp->additional_info, req->msg.resp_data,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
++
++ /* ensure additional_info will be seen before setting received_resp */
++ smp_wmb();
++
+ if (match_id) {
+ /* If match_id is not zero, it means PF support match_id.
+ * if the match_id is right, VF get the right response, or
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index acf4f6ba73a6f..f4692a8726b1c 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -4790,14 +4790,17 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+ {
+ if (sset == ETH_SS_STATS) {
++ struct mvneta_port *pp = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mvneta_statistics[i].name, ETH_GSTRING_LEN);
+
+- data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
+- page_pool_ethtool_stats_get_strings(data);
++ if (!pp->bm_priv) {
++ data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
++ page_pool_ethtool_stats_get_strings(data);
++ }
+ }
+ }
+
+@@ -4915,8 +4918,10 @@ static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
+ struct page_pool_stats stats = {};
+ int i;
+
+- for (i = 0; i < rxq_number; i++)
+- page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
++ for (i = 0; i < rxq_number; i++) {
++ if (pp->rxqs[i].page_pool)
++ page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
++ }
+
+ page_pool_ethtool_stats_get(data, &stats);
+ }
+@@ -4932,14 +4937,21 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ *data++ = pp->ethtool_stats[i];
+
+- mvneta_ethtool_pp_stats(pp, data);
++ if (!pp->bm_priv)
++ mvneta_ethtool_pp_stats(pp, data);
+ }
+
+ static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
+ {
+- if (sset == ETH_SS_STATS)
+- return ARRAY_SIZE(mvneta_statistics) +
+- page_pool_ethtool_stats_get_count();
++ if (sset == ETH_SS_STATS) {
++ int count = ARRAY_SIZE(mvneta_statistics);
++ struct mvneta_port *pp = netdev_priv(dev);
++
++ if (!pp->bm_priv)
++ count += page_pool_ethtool_stats_get_count();
++
++ return count;
++ }
+
+ return -EOPNOTSUPP;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+index 0107e4e73bb06..415840c3ef84f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+@@ -18,6 +18,7 @@ void mlx5e_reporter_tx_create(struct mlx5e_priv *priv);
+ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv);
+ void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq);
+ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq);
++void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq);
+
+ int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+ int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+index b0b429a0321ed..af3928eddafd1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+@@ -2,9 +2,12 @@
+ // Copyright (c) 2020 Mellanox Technologies
+
+ #include "en/ptp.h"
++#include "en/health.h"
+ #include "en/txrx.h"
+ #include "en/params.h"
+ #include "en/fs_tt_redirect.h"
++#include <linux/list.h>
++#include <linux/spinlock.h>
+
+ struct mlx5e_ptp_fs {
+ struct mlx5_flow_handle *l2_rule;
+@@ -19,6 +22,48 @@ struct mlx5e_ptp_params {
+ struct mlx5e_rq_param rq_param;
+ };
+
++struct mlx5e_ptp_port_ts_cqe_tracker {
++ u8 metadata_id;
++ bool inuse : 1;
++ struct list_head entry;
++};
++
++struct mlx5e_ptp_port_ts_cqe_list {
++ struct mlx5e_ptp_port_ts_cqe_tracker *nodes;
++ struct list_head tracker_list_head;
++ /* Sync list operations in xmit and napi_poll contexts */
++ spinlock_t tracker_list_lock;
++};
++
++static inline void
++mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
++{
++ struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
++
++ WARN_ON_ONCE(tracker->inuse);
++ tracker->inuse = true;
++ spin_lock(&list->tracker_list_lock);
++ list_add_tail(&tracker->entry, &list->tracker_list_head);
++ spin_unlock(&list->tracker_list_lock);
++}
++
++static void
++mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
++{
++ struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
++
++ WARN_ON_ONCE(!tracker->inuse);
++ tracker->inuse = false;
++ spin_lock(&list->tracker_list_lock);
++ list_del(&tracker->entry);
++ spin_unlock(&list->tracker_list_lock);
++}
++
++void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
++{
++ mlx5e_ptp_port_ts_cqe_list_add(ptpsq->ts_cqe_pending_list, metadata);
++}
++
+ struct mlx5e_skb_cb_hwtstamp {
+ ktime_t cqe_hwtstamp;
+ ktime_t port_hwtstamp;
+@@ -79,84 +124,113 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
+ memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
+ }
+
+-#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
+-
+-static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_ci, u16 skb_id)
++static struct sk_buff *
++mlx5e_ptp_metadata_map_lookup(struct mlx5e_ptp_metadata_map *map, u16 metadata)
+ {
+- return (ptpsq->ts_cqe_ctr_mask && (skb_ci != skb_id));
++ return map->data[metadata];
+ }
+
+-static bool mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq *ptpsq, u16 skb_id)
++static struct sk_buff *
++mlx5e_ptp_metadata_map_remove(struct mlx5e_ptp_metadata_map *map, u16 metadata)
+ {
+- u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+- u16 skb_pi = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc);
++ struct sk_buff *skb;
+
+- if (PTP_WQE_CTR2IDX(skb_id - skb_ci) >= PTP_WQE_CTR2IDX(skb_pi - skb_ci))
+- return true;
++ skb = map->data[metadata];
++ map->data[metadata] = NULL;
+
+- return false;
++ return skb;
+ }
+
+-static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_ci,
+- u16 skb_id, int budget)
++static bool mlx5e_ptp_metadata_map_unhealthy(struct mlx5e_ptp_metadata_map *map)
+ {
+- struct skb_shared_hwtstamps hwts = {};
+- struct sk_buff *skb;
++ /* Considered beginning unhealthy state if size * 15 / 2^4 cannot be reclaimed. */
++ return map->undelivered_counter > (map->capacity >> 4) * 15;
++}
+
+- ptpsq->cq_stats->resync_event++;
++static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
++ ktime_t port_tstamp)
++{
++ struct mlx5e_ptp_port_ts_cqe_list *cqe_list = ptpsq->ts_cqe_pending_list;
++ ktime_t timeout = ns_to_ktime(MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT);
++ struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
++ struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
+
+- while (skb_ci != skb_id) {
+- skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+- hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
+- skb_tstamp_tx(skb, &hwts);
+- ptpsq->cq_stats->resync_cqe++;
+- napi_consume_skb(skb, budget);
+- skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
++ spin_lock(&cqe_list->tracker_list_lock);
++ list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
++ struct sk_buff *skb =
++ mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
++ ktime_t dma_tstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
++
++ if (!dma_tstamp ||
++ ktime_after(ktime_add(dma_tstamp, timeout), port_tstamp))
++ break;
++
++ metadata_map->undelivered_counter++;
++ WARN_ON_ONCE(!pos->inuse);
++ pos->inuse = false;
++ list_del(&pos->entry);
+ }
++ spin_unlock(&cqe_list->tracker_list_lock);
+ }
+
++#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
++
+ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
+ struct mlx5_cqe64 *cqe,
++ u8 *md_buff,
++ u8 *md_buff_sz,
+ int budget)
+ {
+- u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
+- u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
++ struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
++ u8 metadata_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
++ bool is_err_cqe = !!MLX5E_RX_ERR_CQE(cqe);
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+ struct sk_buff *skb;
+ ktime_t hwtstamp;
+
+- if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+- skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+- ptpsq->cq_stats->err_cqe++;
+- goto out;
++ if (likely(pending_cqe_list->nodes[metadata_id].inuse)) {
++ mlx5e_ptp_port_ts_cqe_list_remove(pending_cqe_list, metadata_id);
++ } else {
++ /* Reclaim space in the unlikely event CQE was delivered after
++ * marking it late.
++ */
++ ptpsq->metadata_map.undelivered_counter--;
++ ptpsq->cq_stats->late_cqe++;
+ }
+
+- if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_ci, skb_id)) {
+- if (mlx5e_ptp_ts_cqe_ooo(ptpsq, skb_id)) {
+- /* already handled by a previous resync */
+- ptpsq->cq_stats->ooo_cqe_drop++;
+- return;
+- }
+- mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_ci, skb_id, budget);
++ skb = mlx5e_ptp_metadata_map_remove(&ptpsq->metadata_map, metadata_id);
++
++ if (unlikely(is_err_cqe)) {
++ ptpsq->cq_stats->err_cqe++;
++ goto out;
+ }
+
+- skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
+ mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
+ hwtstamp, ptpsq->cq_stats);
+ ptpsq->cq_stats->cqe++;
+
++ mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
+ out:
+ napi_consume_skb(skb, budget);
++ md_buff[*md_buff_sz++] = metadata_id;
++ if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
++ !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
++ queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
+ }
+
+-static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
++static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget)
+ {
+ struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
+- struct mlx5_cqwq *cqwq = &cq->wq;
++ int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET);
++ u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET];
++ u8 metadata_buff_sz = 0;
++ struct mlx5_cqwq *cqwq;
+ struct mlx5_cqe64 *cqe;
+ int work_done = 0;
+
++ cqwq = &cq->wq;
++
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
+ return false;
+
+@@ -167,7 +241,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
+ do {
+ mlx5_cqwq_pop(cqwq);
+
+- mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
++ mlx5e_ptp_handle_ts_cqe(ptpsq, cqe,
++ metadata_buff, &metadata_buff_sz, napi_budget);
+ } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
+
+ mlx5_cqwq_update_db_record(cqwq);
+@@ -175,6 +250,10 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
++ while (metadata_buff_sz > 0)
++ mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist,
++ metadata_buff[--metadata_buff_sz]);
++
+ mlx5e_txqsq_wake(&ptpsq->txqsq);
+
+ return work_done == budget;
+@@ -291,36 +370,86 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
+
+ static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
+ {
+- int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
+- struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
++ struct mlx5e_ptp_metadata_fifo *metadata_freelist = &ptpsq->metadata_freelist;
++ struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
++ struct mlx5e_ptp_port_ts_cqe_list *cqe_list;
++ int db_sz;
++ int md;
+
+- ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
+- GFP_KERNEL, numa);
+- if (!ptpsq->skb_fifo.fifo)
++ cqe_list = kvzalloc_node(sizeof(*ptpsq->ts_cqe_pending_list), GFP_KERNEL, numa);
++ if (!cqe_list)
+ return -ENOMEM;
++ ptpsq->ts_cqe_pending_list = cqe_list;
++
++ db_sz = min_t(u32, mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq),
++ 1 << MLX5_CAP_GEN_2(ptpsq->txqsq.mdev,
++ ts_cqe_metadata_size2wqe_counter));
++ ptpsq->ts_cqe_ctr_mask = db_sz - 1;
++
++ cqe_list->nodes = kvzalloc_node(array_size(db_sz, sizeof(*cqe_list->nodes)),
++ GFP_KERNEL, numa);
++ if (!cqe_list->nodes)
++ goto free_cqe_list;
++ INIT_LIST_HEAD(&cqe_list->tracker_list_head);
++ spin_lock_init(&cqe_list->tracker_list_lock);
++
++ metadata_freelist->data =
++ kvzalloc_node(array_size(db_sz, sizeof(*metadata_freelist->data)),
++ GFP_KERNEL, numa);
++ if (!metadata_freelist->data)
++ goto free_cqe_list_nodes;
++ metadata_freelist->mask = ptpsq->ts_cqe_ctr_mask;
++
++ for (md = 0; md < db_sz; ++md) {
++ cqe_list->nodes[md].metadata_id = md;
++ metadata_freelist->data[md] = md;
++ }
++ metadata_freelist->pc = db_sz;
++
++ metadata_map->data =
++ kvzalloc_node(array_size(db_sz, sizeof(*metadata_map->data)),
++ GFP_KERNEL, numa);
++ if (!metadata_map->data)
++ goto free_metadata_freelist;
++ metadata_map->capacity = db_sz;
+
+- ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
+- ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
+- ptpsq->skb_fifo.mask = wq_sz - 1;
+- if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
+- ptpsq->ts_cqe_ctr_mask =
+- (1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
+ return 0;
++
++free_metadata_freelist:
++ kvfree(metadata_freelist->data);
++free_cqe_list_nodes:
++ kvfree(cqe_list->nodes);
++free_cqe_list:
++ kvfree(cqe_list);
++ return -ENOMEM;
+ }
+
+-static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
++static void mlx5e_ptp_drain_metadata_map(struct mlx5e_ptp_metadata_map *map)
+ {
+- while (*skb_fifo->pc != *skb_fifo->cc) {
+- struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
++ int idx;
++
++ for (idx = 0; idx < map->capacity; ++idx) {
++ struct sk_buff *skb = map->data[idx];
+
+ dev_kfree_skb_any(skb);
+ }
+ }
+
+-static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
++static void mlx5e_ptp_free_traffic_db(struct mlx5e_ptpsq *ptpsq)
+ {
+- mlx5e_ptp_drain_skb_fifo(skb_fifo);
+- kvfree(skb_fifo->fifo);
++ mlx5e_ptp_drain_metadata_map(&ptpsq->metadata_map);
++ kvfree(ptpsq->metadata_map.data);
++ kvfree(ptpsq->metadata_freelist.data);
++ kvfree(ptpsq->ts_cqe_pending_list->nodes);
++ kvfree(ptpsq->ts_cqe_pending_list);
++}
++
++static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
++{
++ struct mlx5e_ptpsq *ptpsq =
++ container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
++
++ mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq);
+ }
+
+ static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
+@@ -348,11 +477,12 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
+ if (err)
+ goto err_free_txqsq;
+
+- err = mlx5e_ptp_alloc_traffic_db(ptpsq,
+- dev_to_node(mlx5_core_dma_dev(c->mdev)));
++ err = mlx5e_ptp_alloc_traffic_db(ptpsq, dev_to_node(mlx5_core_dma_dev(c->mdev)));
+ if (err)
+ goto err_free_txqsq;
+
++ INIT_WORK(&ptpsq->report_unhealthy_work, mlx5e_ptpsq_unhealthy_work);
++
+ return 0;
+
+ err_free_txqsq:
+@@ -366,7 +496,9 @@ static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+ struct mlx5_core_dev *mdev = sq->mdev;
+
+- mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
++ if (current_work() != &ptpsq->report_unhealthy_work)
++ cancel_work_sync(&ptpsq->report_unhealthy_work);
++ mlx5e_ptp_free_traffic_db(ptpsq);
+ cancel_work_sync(&sq->recover_work);
+ mlx5e_ptp_destroy_sq(mdev, sq->sqn);
+ mlx5e_free_txqsq_descs(sq);
+@@ -534,7 +666,10 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
+
+ /* SQ */
+ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
+- params->log_sq_size = orig->log_sq_size;
++ params->log_sq_size =
++ min(MLX5_CAP_GEN_2(c->mdev, ts_cqe_metadata_size2wqe_counter),
++ MLX5E_PTP_MAX_LOG_SQ_SIZE);
++ params->log_sq_size = min(params->log_sq_size, orig->log_sq_size);
+ mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
+ }
+ /* RQ */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+index cc7efde88ac3c..7b700d0f956a8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+@@ -7,18 +7,38 @@
+ #include "en.h"
+ #include "en_stats.h"
+ #include "en/txrx.h"
++#include <linux/ktime.h>
+ #include <linux/ptp_classify.h>
++#include <linux/time64.h>
++#include <linux/workqueue.h>
+
+ #define MLX5E_PTP_CHANNEL_IX 0
++#define MLX5E_PTP_MAX_LOG_SQ_SIZE (8U)
++#define MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT (1 * NSEC_PER_SEC)
++
++struct mlx5e_ptp_metadata_fifo {
++ u8 cc;
++ u8 pc;
++ u8 mask;
++ u8 *data;
++};
++
++struct mlx5e_ptp_metadata_map {
++ u16 undelivered_counter;
++ u16 capacity;
++ struct sk_buff **data;
++};
+
+ struct mlx5e_ptpsq {
+ struct mlx5e_txqsq txqsq;
+ struct mlx5e_cq ts_cq;
+- u16 skb_fifo_cc;
+- u16 skb_fifo_pc;
+- struct mlx5e_skb_fifo skb_fifo;
+ struct mlx5e_ptp_cq_stats *cq_stats;
+ u16 ts_cqe_ctr_mask;
++
++ struct work_struct report_unhealthy_work;
++ struct mlx5e_ptp_port_ts_cqe_list *ts_cqe_pending_list;
++ struct mlx5e_ptp_metadata_fifo metadata_freelist;
++ struct mlx5e_ptp_metadata_map metadata_map;
+ };
+
+ enum {
+@@ -69,12 +89,35 @@ static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
+ fk.ports.dst == htons(PTP_EV_PORT));
+ }
+
+-static inline bool mlx5e_ptpsq_fifo_has_room(struct mlx5e_txqsq *sq)
++static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *fifo, u8 metadata)
+ {
+- if (!sq->ptpsq)
+- return true;
++ fifo->data[fifo->mask & fifo->pc++] = metadata;
++}
++
++static inline u8
++mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo)
++{
++ return fifo->data[fifo->mask & fifo->cc++];
++}
+
+- return mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo);
++static inline void
++mlx5e_ptp_metadata_map_put(struct mlx5e_ptp_metadata_map *map,
++ struct sk_buff *skb, u8 metadata)
++{
++ WARN_ON_ONCE(map->data[metadata]);
++ map->data[metadata] = skb;
++}
++
++static inline bool mlx5e_ptpsq_metadata_freelist_empty(struct mlx5e_ptpsq *ptpsq)
++{
++ struct mlx5e_ptp_metadata_fifo *freelist;
++
++ if (likely(!ptpsq))
++ return false;
++
++ freelist = &ptpsq->metadata_freelist;
++
++ return freelist->pc == freelist->cc;
+ }
+
+ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
+@@ -89,6 +132,8 @@ void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
+ const struct mlx5e_profile *profile);
+ int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set);
+
++void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata);
++
+ enum {
+ MLX5E_SKB_CB_CQE_HWTSTAMP = BIT(0),
+ MLX5E_SKB_CB_PORT_HWTSTAMP = BIT(1),
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+index e8eea9ffd5eb6..03b119a434bc9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+@@ -702,11 +702,11 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
+
+ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+ {
+- char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_icosq *icosq = rq->icosq;
+ struct mlx5e_priv *priv = rq->priv;
+ struct mlx5e_err_ctx err_ctx = {};
++ char icosq_str[32] = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
+@@ -715,7 +715,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+ if (icosq)
+ snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
+ snprintf(err_str, sizeof(err_str),
+- "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x",
++ "RX timeout on channel: %d, %s RQ: 0x%x, CQ: 0x%x",
+ rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+index b35ff289af492..ff8242f67c545 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+@@ -164,6 +164,43 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
+ return err;
+ }
+
++static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
++{
++ struct mlx5e_ptpsq *ptpsq = ctx;
++ struct mlx5e_channels *chs;
++ struct net_device *netdev;
++ struct mlx5e_priv *priv;
++ int carrier_ok;
++ int err;
++
++ if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &ptpsq->txqsq.state))
++ return 0;
++
++ priv = ptpsq->txqsq.priv;
++
++ mutex_lock(&priv->state_lock);
++ chs = &priv->channels;
++ netdev = priv->netdev;
++
++ carrier_ok = netif_carrier_ok(netdev);
++ netif_carrier_off(netdev);
++
++ mlx5e_deactivate_priv_channels(priv);
++
++ mlx5e_ptp_close(chs->ptp);
++ err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
++
++ mlx5e_activate_priv_channels(priv);
++
++ /* return carrier back if needed */
++ if (carrier_ok)
++ netif_carrier_on(netdev);
++
++ mutex_unlock(&priv->state_lock);
++
++ return err;
++}
++
+ /* state lock cannot be grabbed within this function.
+ * It can cause a dead lock or a read-after-free.
+ */
+@@ -516,6 +553,15 @@ static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlin
+ return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
+ }
+
++static int mlx5e_tx_reporter_ptpsq_unhealthy_dump(struct mlx5e_priv *priv,
++ struct devlink_fmsg *fmsg,
++ void *ctx)
++{
++ struct mlx5e_ptpsq *ptpsq = ctx;
++
++ return mlx5e_tx_reporter_dump_sq(priv, fmsg, &ptpsq->txqsq);
++}
++
+ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg)
+ {
+@@ -621,6 +667,25 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
+ return to_ctx.status;
+ }
+
++void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq)
++{
++ struct mlx5e_ptp_metadata_map *map = &ptpsq->metadata_map;
++ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
++ struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
++ struct mlx5e_cq *ts_cq = &ptpsq->ts_cq;
++ struct mlx5e_priv *priv = txqsq->priv;
++ struct mlx5e_err_ctx err_ctx = {};
++
++ err_ctx.ctx = ptpsq;
++ err_ctx.recover = mlx5e_tx_reporter_ptpsq_unhealthy_recover;
++ err_ctx.dump = mlx5e_tx_reporter_ptpsq_unhealthy_dump;
++ snprintf(err_str, sizeof(err_str),
++ "Unhealthy TX port TS queue: %d, SQ: 0x%x, CQ: 0x%x, Undelivered CQEs: %u Map Capacity: %u",
++ txqsq->ch_ix, txqsq->sqn, ts_cq->mcq.cqn, map->undelivered_counter, map->capacity);
++
++ mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
++}
++
+ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
+ .name = "tx",
+ .recover = mlx5e_tx_reporter_recover,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+index 00a04fdd756f5..668da5c70e63d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+@@ -300,9 +300,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ if (err)
+ goto destroy_neigh_entry;
+
+- e->encap_size = ipv4_encap_size;
+- e->encap_header = encap_header;
+-
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+@@ -322,6 +319,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ goto destroy_neigh_entry;
+ }
+
++ e->encap_size = ipv4_encap_size;
++ e->encap_header = encap_header;
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv4_put(&attr);
+@@ -404,16 +403,12 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ if (err)
+ goto free_encap;
+
+- e->encap_size = ipv4_encap_size;
+- kfree(e->encap_header);
+- e->encap_header = encap_header;
+-
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+ * and not used before that.
+ */
+- goto release_neigh;
++ goto free_encap;
+ }
+
+ memset(&reformat_params, 0, sizeof(reformat_params));
+@@ -427,6 +422,10 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ goto free_encap;
+ }
+
++ e->encap_size = ipv4_encap_size;
++ kfree(e->encap_header);
++ e->encap_header = encap_header;
++
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv4_put(&attr);
+@@ -568,9 +567,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ if (err)
+ goto destroy_neigh_entry;
+
+- e->encap_size = ipv6_encap_size;
+- e->encap_header = encap_header;
+-
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+@@ -590,6 +586,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ goto destroy_neigh_entry;
+ }
+
++ e->encap_size = ipv6_encap_size;
++ e->encap_header = encap_header;
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv6_put(&attr);
+@@ -671,16 +669,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ if (err)
+ goto free_encap;
+
+- e->encap_size = ipv6_encap_size;
+- kfree(e->encap_header);
+- e->encap_header = encap_header;
+-
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+ * and not used before that.
+ */
+- goto release_neigh;
++ goto free_encap;
+ }
+
+ memset(&reformat_params, 0, sizeof(reformat_params));
+@@ -694,6 +688,10 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ goto free_encap;
+ }
+
++ e->encap_size = ipv6_encap_size;
++ kfree(e->encap_header);
++ e->encap_header = encap_header;
++
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv6_put(&attr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 27861b68ced57..bd3fabb007c94 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -43,12 +43,17 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
+ struct ethtool_drvinfo *drvinfo)
+ {
+ struct mlx5_core_dev *mdev = priv->mdev;
++ int count;
+
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+- "%d.%d.%04d (%.16s)",
+- fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
+- mdev->board_id);
++ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
++ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
++ if (count == sizeof(drvinfo->fw_version))
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%d.%d.%04d", fw_rev_maj(mdev),
++ fw_rev_min(mdev), fw_rev_sub(mdev));
++
+ strscpy(drvinfo->bus_info, dev_name(mdev->device),
+ sizeof(drvinfo->bus_info));
+ }
+@@ -2061,7 +2066,8 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
+ struct mlx5e_params new_params;
+ int err;
+
+- if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
++ if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) ||
++ !MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
+ return -EOPNOTSUPP;
+
+ /* Don't allow changing the PTP state if HTB offload is active, because
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 0cd44ef190058..87fda65852fb7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -71,13 +71,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
+ {
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
++ int count;
+
+ strscpy(drvinfo->driver, mlx5e_rep_driver_name,
+ sizeof(drvinfo->driver));
+- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+- "%d.%d.%04d (%.16s)",
+- fw_rev_maj(mdev), fw_rev_min(mdev),
+- fw_rev_sub(mdev), mdev->board_id);
++ count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
++ fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
++ if (count == sizeof(drvinfo->fw_version))
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%d.%d.%04d", fw_rev_maj(mdev),
++ fw_rev_min(mdev), fw_rev_sub(mdev));
+ }
+
+ static const struct counter_desc sw_rep_stats_desc[] = {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+index 4d77055abd4be..dfdd357974164 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+@@ -2142,9 +2142,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
+- { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
+- { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
+- { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, ooo_cqe_drop) },
++ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) },
+ };
+
+ static const struct counter_desc ptp_rq_stats_desc[] = {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+index 67938b4ea1b90..13a07e52ae92b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+@@ -449,9 +449,7 @@ struct mlx5e_ptp_cq_stats {
+ u64 err_cqe;
+ u64 abort;
+ u64 abort_abs_diff_ns;
+- u64 resync_cqe;
+- u64 resync_event;
+- u64 ooo_cqe_drop;
++ u64 late_cqe;
+ };
+
+ struct mlx5e_rep_stats {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 5797d8607633e..fdef505c4b88f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -3148,7 +3148,7 @@ static struct mlx5_fields fields[] = {
+ OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
+ OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
+- OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
++ OFFLOAD(IP_DSCP, 16, 0x0fc0, ip6, 0, ip_dscp),
+
+ OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
+ OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
+@@ -3159,21 +3159,31 @@ static struct mlx5_fields fields[] = {
+ OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
+ };
+
+-static unsigned long mask_to_le(unsigned long mask, int size)
++static u32 mask_field_get(void *mask, struct mlx5_fields *f)
+ {
+- __be32 mask_be32;
+- __be16 mask_be16;
+-
+- if (size == 32) {
+- mask_be32 = (__force __be32)(mask);
+- mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
+- } else if (size == 16) {
+- mask_be32 = (__force __be32)(mask);
+- mask_be16 = *(__be16 *)&mask_be32;
+- mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
++ switch (f->field_bsize) {
++ case 32:
++ return be32_to_cpu(*(__be32 *)mask) & f->field_mask;
++ case 16:
++ return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask;
++ default:
++ return *(u8 *)mask & (u8)f->field_mask;
+ }
++}
+
+- return mask;
++static void mask_field_clear(void *mask, struct mlx5_fields *f)
++{
++ switch (f->field_bsize) {
++ case 32:
++ *(__be32 *)mask &= ~cpu_to_be32(f->field_mask);
++ break;
++ case 16:
++ *(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask);
++ break;
++ default:
++ *(u8 *)mask &= ~(u8)f->field_mask;
++ break;
++ }
+ }
+
+ static int offload_pedit_fields(struct mlx5e_priv *priv,
+@@ -3185,11 +3195,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+ struct pedit_headers_action *hdrs = parse_attr->hdrs;
+ void *headers_c, *headers_v, *action, *vals_p;
+- u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
+- unsigned long mask, field_mask;
++ void *s_masks_p, *a_masks_p;
+ int i, first, last, next_z;
+ struct mlx5_fields *f;
++ unsigned long mask;
++ u32 s_mask, a_mask;
+ u8 cmd;
+
+ mod_acts = &parse_attr->mod_hdr_acts;
+@@ -3205,15 +3216,11 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ bool skip;
+
+ f = &fields[i];
+- /* avoid seeing bits set from previous iterations */
+- s_mask = 0;
+- a_mask = 0;
+-
+ s_masks_p = (void *)set_masks + f->offset;
+ a_masks_p = (void *)add_masks + f->offset;
+
+- s_mask = *s_masks_p & f->field_mask;
+- a_mask = *a_masks_p & f->field_mask;
++ s_mask = mask_field_get(s_masks_p, f);
++ a_mask = mask_field_get(a_masks_p, f);
+
+ if (!s_mask && !a_mask) /* nothing to offload here */
+ continue;
+@@ -3240,22 +3247,20 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ match_mask, f->field_bsize))
+ skip = true;
+ /* clear to denote we consumed this field */
+- *s_masks_p &= ~f->field_mask;
++ mask_field_clear(s_masks_p, f);
+ } else {
+ cmd = MLX5_ACTION_TYPE_ADD;
+ mask = a_mask;
+ vals_p = (void *)add_vals + f->offset;
+ /* add 0 is no change */
+- if ((*(u32 *)vals_p & f->field_mask) == 0)
++ if (!mask_field_get(vals_p, f))
+ skip = true;
+ /* clear to denote we consumed this field */
+- *a_masks_p &= ~f->field_mask;
++ mask_field_clear(a_masks_p, f);
+ }
+ if (skip)
+ continue;
+
+- mask = mask_to_le(mask, f->field_bsize);
+-
+ first = find_first_bit(&mask, f->field_bsize);
+ next_z = find_next_zero_bit(&mask, f->field_bsize, first);
+ last = find_last_bit(&mask, f->field_bsize);
+@@ -3282,10 +3287,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+ MLX5_SET(set_action_in, action, field, f->field);
+
+ if (cmd == MLX5_ACTION_TYPE_SET) {
++ unsigned long field_mask = f->field_mask;
+ int start;
+
+- field_mask = mask_to_le(f->field_mask, f->field_bsize);
+-
+ /* if field is bit sized it can start not from first bit */
+ start = find_first_bit(&field_mask, f->field_bsize);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index c7eb6b238c2ba..f0b506e562df3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -372,7 +372,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ const struct mlx5e_tx_attr *attr,
+ const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma,
+ struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
+- bool xmit_more)
++ struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
+ {
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ bool send_doorbell;
+@@ -394,11 +394,16 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+
+ mlx5e_tx_check_stop(sq);
+
+- if (unlikely(sq->ptpsq)) {
++ if (unlikely(sq->ptpsq &&
++ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
++ u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
++
+ mlx5e_skb_cb_hwtstamp_init(skb);
+- mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
++ mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
++ metadata_index);
++ mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
+ if (!netif_tx_queue_stopped(sq->txq) &&
+- !mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo)) {
++ mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
+ netif_tx_stop_queue(sq->txq);
+ sq->stats->stopped++;
+ }
+@@ -483,12 +488,15 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ if (unlikely(num_dma < 0))
+ goto err_drop;
+
+- mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more);
++ mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, eseg, xmit_more);
+
+ return;
+
+ err_drop:
+ stats->dropped++;
++ if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
++ mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
++ be32_to_cpu(eseg->flow_table_metadata));
+ dev_kfree_skb_any(skb);
+ mlx5e_tx_flush(sq);
+ }
+@@ -645,9 +653,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
+ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+ {
+- if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+- eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc &
+- ptpsq->ts_cqe_ctr_mask);
++ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
++ eseg->flow_table_metadata =
++ cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist));
+ }
+
+ static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
+@@ -766,7 +774,7 @@ void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
+ {
+ if (netif_tx_queue_stopped(sq->txq) &&
+ mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
+- mlx5e_ptpsq_fifo_has_room(sq) &&
++ !mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq) &&
+ !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
+ netif_tx_wake_queue(sq->txq);
+ sq->stats->wake++;
+@@ -1031,7 +1039,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ if (unlikely(num_dma < 0))
+ goto err_drop;
+
+- mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more);
++ mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, eseg, xmit_more);
+
+ return;
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index aa29f09e83564..0c83ef174275a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -384,7 +384,12 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+
+ static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
+ {
+- return mlx5_ptp_adjtime(ptp, delta);
++ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
++ struct mlx5_core_dev *mdev;
++
++ mdev = container_of(clock, struct mlx5_core_dev, clock);
++
++ return mlx5_ptp_adjtime_real_time(mdev, delta);
+ }
+
+ static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+index cba2a4afb5fda..235e170c65bb7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+@@ -28,7 +28,7 @@
+ struct mlx5_irq {
+ struct atomic_notifier_head nh;
+ cpumask_var_t mask;
+- char name[MLX5_MAX_IRQ_NAME];
++ char name[MLX5_MAX_IRQ_FORMATTED_NAME];
+ struct mlx5_irq_pool *pool;
+ int refcount;
+ struct msi_map map;
+@@ -289,8 +289,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
+ else
+ irq_sf_set_name(pool, name, i);
+ ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
+- snprintf(irq->name, MLX5_MAX_IRQ_NAME,
+- "%s@pci:%s", name, pci_name(dev->pdev));
++ snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME,
++ MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev));
+ err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
+ &irq->nh);
+ if (err) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+index d3a77a0ab8488..c4d377f8df308 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+@@ -7,6 +7,9 @@
+ #include <linux/mlx5/driver.h>
+
+ #define MLX5_MAX_IRQ_NAME (32)
++#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s")
++#define MLX5_MAX_IRQ_FORMATTED_NAME \
++ (MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR))
+ /* max irq_index is 2047, so four chars */
+ #define MLX5_MAX_IRQ_IDX_CHARS (4)
+ #define MLX5_EQ_REFS_PER_IRQ (2)
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 0c76c162b8a9f..295366a85c630 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -624,6 +624,7 @@ struct rtl8169_private {
+
+ unsigned supports_gmii:1;
+ unsigned aspm_manageable:1;
++ unsigned dash_enabled:1;
+ dma_addr_t counters_phys_addr;
+ struct rtl8169_counters *counters;
+ struct rtl8169_tc_offsets tc_offset;
+@@ -1253,14 +1254,26 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp)
+ return r8168ep_ocp_read(tp, 0x128) & BIT(0);
+ }
+
+-static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
++static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
++{
++ switch (tp->dash_type) {
++ case RTL_DASH_DP:
++ return r8168dp_check_dash(tp);
++ case RTL_DASH_EP:
++ return r8168ep_check_dash(tp);
++ default:
++ return false;
++ }
++}
++
++static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
+ {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+- return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
++ return RTL_DASH_DP;
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+- return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
++ return RTL_DASH_EP;
+ default:
+ return RTL_DASH_NONE;
+ }
+@@ -1453,7 +1466,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+
+ device_set_wakeup_enable(tp_to_dev(tp), wolopts);
+
+- if (tp->dash_type == RTL_DASH_NONE) {
++ if (!tp->dash_enabled) {
+ rtl_set_d3_pll_down(tp, !wolopts);
+ tp->dev->wol_enabled = wolopts ? 1 : 0;
+ }
+@@ -2512,7 +2525,7 @@ static void rtl_wol_enable_rx(struct rtl8169_private *tp)
+
+ static void rtl_prepare_power_down(struct rtl8169_private *tp)
+ {
+- if (tp->dash_type != RTL_DASH_NONE)
++ if (tp->dash_enabled)
+ return;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+@@ -2586,9 +2599,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
+ rx_mode &= ~AcceptMulticast;
+ } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
+ dev->flags & IFF_ALLMULTI ||
+- tp->mac_version == RTL_GIGA_MAC_VER_35 ||
+- tp->mac_version == RTL_GIGA_MAC_VER_46 ||
+- tp->mac_version == RTL_GIGA_MAC_VER_48) {
++ tp->mac_version == RTL_GIGA_MAC_VER_35) {
+ /* accept all multicasts */
+ } else if (netdev_mc_empty(dev)) {
+ rx_mode &= ~AcceptMulticast;
+@@ -4648,10 +4659,16 @@ static void rtl8169_down(struct rtl8169_private *tp)
+ rtl8169_cleanup(tp);
+ rtl_disable_exit_l1(tp);
+ rtl_prepare_power_down(tp);
++
++ if (tp->dash_type != RTL_DASH_NONE)
++ rtl8168_driver_stop(tp);
+ }
+
+ static void rtl8169_up(struct rtl8169_private *tp)
+ {
++ if (tp->dash_type != RTL_DASH_NONE)
++ rtl8168_driver_start(tp);
++
+ pci_set_master(tp->pci_dev);
+ phy_init_hw(tp->phydev);
+ phy_resume(tp->phydev);
+@@ -4869,7 +4886,7 @@ static int rtl8169_runtime_idle(struct device *device)
+ {
+ struct rtl8169_private *tp = dev_get_drvdata(device);
+
+- if (tp->dash_type != RTL_DASH_NONE)
++ if (tp->dash_enabled)
+ return -EBUSY;
+
+ if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
+@@ -4895,8 +4912,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
+ /* Restore original MAC address */
+ rtl_rar_set(tp, tp->dev->perm_addr);
+
+- if (system_state == SYSTEM_POWER_OFF &&
+- tp->dash_type == RTL_DASH_NONE) {
++ if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
+ pci_wake_from_d3(pdev, tp->saved_wolopts);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+@@ -5254,7 +5270,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ tp->aspm_manageable = !rc;
+
+- tp->dash_type = rtl_check_dash(tp);
++ tp->dash_type = rtl_get_dash_type(tp);
++ tp->dash_enabled = rtl_dash_is_enabled(tp);
+
+ tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
+
+@@ -5325,7 +5342,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ /* configure chip for default features */
+ rtl8169_set_features(dev, dev->features);
+
+- if (tp->dash_type == RTL_DASH_NONE) {
++ if (!tp->dash_enabled) {
+ rtl_set_d3_pll_down(tp, true);
+ } else {
+ rtl_set_d3_pll_down(tp, false);
+@@ -5365,7 +5382,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ "ok" : "ko");
+
+ if (tp->dash_type != RTL_DASH_NONE) {
+- netdev_info(dev, "DASH enabled\n");
++ netdev_info(dev, "DASH %s\n",
++ tp->dash_enabled ? "enabled" : "disabled");
+ rtl8168_driver_start(tp);
+ }
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index e840cadb2d75a..86ff015fba354 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -5223,6 +5223,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+
+ dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
+ buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
++ limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
+
+ if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+@@ -5258,10 +5259,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+ len = 0;
+ }
+
++read_again:
+ if (count >= limit)
+ break;
+
+-read_again:
+ buf1_len = 0;
+ buf2_len = 0;
+ entry = next_entry;
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index 21e9cac731218..2d5b021b4ea60 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -411,7 +411,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
+ return addr;
+ }
+
+-static int ipvlan_process_v4_outbound(struct sk_buff *skb)
++static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ {
+ const struct iphdr *ip4h = ip_hdr(skb);
+ struct net_device *dev = skb->dev;
+@@ -453,13 +453,11 @@ out:
+ }
+
+ #if IS_ENABLED(CONFIG_IPV6)
+-static int ipvlan_process_v6_outbound(struct sk_buff *skb)
++
++static noinline_for_stack int
++ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
+ {
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+- struct net_device *dev = skb->dev;
+- struct net *net = dev_net(dev);
+- struct dst_entry *dst;
+- int err, ret = NET_XMIT_DROP;
+ struct flowi6 fl6 = {
+ .flowi6_oif = dev->ifindex,
+ .daddr = ip6h->daddr,
+@@ -469,27 +467,38 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ .flowi6_mark = skb->mark,
+ .flowi6_proto = ip6h->nexthdr,
+ };
++ struct dst_entry *dst;
++ int err;
+
+- dst = ip6_route_output(net, NULL, &fl6);
+- if (dst->error) {
+- ret = dst->error;
++ dst = ip6_route_output(dev_net(dev), NULL, &fl6);
++ err = dst->error;
++ if (err) {
+ dst_release(dst);
+- goto err;
++ return err;
+ }
+ skb_dst_set(skb, dst);
++ return 0;
++}
++
++static int ipvlan_process_v6_outbound(struct sk_buff *skb)
++{
++ struct net_device *dev = skb->dev;
++ int err, ret = NET_XMIT_DROP;
++
++ err = ipvlan_route_v6_outbound(dev, skb);
++ if (unlikely(err)) {
++ DEV_STATS_INC(dev, tx_errors);
++ kfree_skb(skb);
++ return err;
++ }
+
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+
+- err = ip6_local_out(net, skb->sk, skb);
++ err = ip6_local_out(dev_net(dev), skb->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ DEV_STATS_INC(dev, tx_errors);
+ else
+ ret = NET_XMIT_SUCCESS;
+- goto out;
+-err:
+- DEV_STATS_INC(dev, tx_errors);
+- kfree_skb(skb);
+-out:
+ return ret;
+ }
+ #else
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index ed908165a8b4e..347f288350619 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -780,7 +780,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+- if (change & IFF_PROMISC)
++ if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC)
+ dev_set_promiscuity(lowerdev,
+ dev->flags & IFF_PROMISC ? 1 : -1);
+
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index d0aaa5cad8533..24ae13ea03b0b 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1568,6 +1568,7 @@ struct phylink *phylink_create(struct phylink_config *config,
+ pl->config = config;
+ if (config->type == PHYLINK_NETDEV) {
+ pl->netdev = to_net_dev(config->dev);
++ netif_carrier_off(pl->netdev);
+ } else if (config->type == PHYLINK_DEV) {
+ pl->dev = config->dev;
+ } else {
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index d855a18308d78..f411ded5344a8 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -452,6 +452,11 @@ static const struct sfp_quirk sfp_quirks[] = {
+ // Rollball protocol to talk to the PHY.
+ SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
+
++ // Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
++ // NRZ in their EEPROM
++ SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex,
++ sfp_fixup_ignore_tx_fault),
++
+ SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
+
+ // HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
+@@ -463,6 +468,9 @@ static const struct sfp_quirk sfp_quirks[] = {
+ SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
+ sfp_fixup_ignore_tx_fault),
+
++ // FS 2.5G Base-T
++ SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
++
+ // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
+ // 2500MBd NRZ in their EEPROM
+ SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
+diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
+index 18283b7b94bcd..94ef6f9ca5103 100644
+--- a/drivers/net/ppp/ppp_synctty.c
++++ b/drivers/net/ppp/ppp_synctty.c
+@@ -462,6 +462,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
+ case PPPIOCSMRU:
+ if (get_user(val, (int __user *) argp))
+ break;
++ if (val > U16_MAX) {
++ err = -EINVAL;
++ break;
++ }
+ if (val < PPP_MRU)
+ val = PPP_MRU;
+ ap->mru = val;
+@@ -697,7 +701,7 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
+
+ /* strip address/control field if present */
+ p = skb->data;
+- if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
++ if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
+ /* chop off address/control */
+ if (skb->len < 3)
+ goto err;
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
+index f9518e1c99039..fe89bc61e5317 100644
+--- a/drivers/net/wireless/ath/ath10k/debug.c
++++ b/drivers/net/wireless/ath/ath10k/debug.c
+@@ -1140,7 +1140,7 @@ void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *ath10k_gstrings_stats,
++ memcpy(data, ath10k_gstrings_stats,
+ sizeof(ath10k_gstrings_stats));
+ }
+
+diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
+index 26214c00cd0d7..2c39bad7ebfb9 100644
+--- a/drivers/net/wireless/ath/ath10k/snoc.c
++++ b/drivers/net/wireless/ath/ath10k/snoc.c
+@@ -828,12 +828,20 @@ static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
+
+ static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
+ {
+- ath10k_ce_disable_interrupts(ar);
++ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
++ int id;
++
++ for (id = 0; id < CE_COUNT_MAX; id++)
++ disable_irq(ar_snoc->ce_irqs[id].irq_line);
+ }
+
+ static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
+ {
+- ath10k_ce_enable_interrupts(ar);
++ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
++ int id;
++
++ for (id = 0; id < CE_COUNT_MAX; id++)
++ enable_irq(ar_snoc->ce_irqs[id].irq_line);
+ }
+
+ static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+@@ -1090,6 +1098,8 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
+ goto err_free_rri;
+ }
+
++ ath10k_ce_enable_interrupts(ar);
++
+ return 0;
+
+ err_free_rri:
+@@ -1253,8 +1263,8 @@ static int ath10k_snoc_request_irq(struct ath10k *ar)
+
+ for (id = 0; id < CE_COUNT_MAX; id++) {
+ ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
+- ath10k_snoc_per_engine_handler, 0,
+- ce_name[id], ar);
++ ath10k_snoc_per_engine_handler,
++ IRQF_NO_AUTOEN, ce_name[id], ar);
+ if (ret) {
+ ath10k_err(ar,
+ "failed to register IRQ handler for CE %d: %d\n",
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index 1e488eed282b5..8ed7d3b7f049f 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -1621,14 +1621,20 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
+ u8 pdev_id;
+
+ pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
++
++ rcu_read_lock();
++
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
+- return;
++ goto out;
+ }
+
+ trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
+ ar->ab->pktlog_defs_checksum);
++
++out:
++ rcu_read_unlock();
+ }
+
+ static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
+diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
+index 23ad6825e5be5..1c07f55c25e67 100644
+--- a/drivers/net/wireless/ath/ath11k/wmi.c
++++ b/drivers/net/wireless/ath/ath11k/wmi.c
+@@ -8337,6 +8337,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
+ ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ ev->freq_offset, ev->sidx);
+
++ rcu_read_lock();
++
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+
+ if (!ar) {
+@@ -8354,6 +8356,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
+ ieee80211_radar_detected(ar->hw);
+
+ exit:
++ rcu_read_unlock();
++
+ kfree(tb);
+ }
+
+@@ -8383,15 +8387,19 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev temperature ev temp %d pdev_id %d\n",
+ ev->temp, ev->pdev_id);
+
++ rcu_read_lock();
++
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
+- kfree(tb);
+- return;
++ goto exit;
+ }
+
+ ath11k_thermal_event_temperature(ar, ev->temp);
+
++exit:
++ rcu_read_unlock();
++
+ kfree(tb);
+ }
+
+@@ -8611,12 +8619,13 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+ return;
+ }
+
++ rcu_read_lock();
++
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ ev->vdev_id);
+- kfree(tb);
+- return;
++ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event gtk offload refresh_cnt %d\n",
+@@ -8633,6 +8642,8 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+
+ ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
+ (void *)&replay_ctr_be, GFP_ATOMIC);
++exit:
++ rcu_read_unlock();
+
+ kfree(tb);
+ }
+diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
+index f933896f2a68d..6893466f61f04 100644
+--- a/drivers/net/wireless/ath/ath12k/dp.c
++++ b/drivers/net/wireless/ath/ath12k/dp.c
+@@ -38,6 +38,7 @@ void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
+
+ ath12k_dp_rx_peer_tid_cleanup(ar, peer);
+ crypto_free_shash(peer->tfm_mmic);
++ peer->dp_setup_done = false;
+ spin_unlock_bh(&ab->base_lock);
+ }
+
+diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
+index fcb91b8ef00e3..71d12c28b3a76 100644
+--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
+@@ -1555,6 +1555,13 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
+
+ msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
+ len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
++ if (len > (skb->len - struct_size(msg, data, 0))) {
++ ath12k_warn(ab,
++ "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
++ len, skb->len);
++ return -EINVAL;
++ }
++
+ pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
+ ppdu_id = le32_to_cpu(msg->ppdu_id);
+
+@@ -1583,6 +1590,16 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
+ goto exit;
+ }
+
++ if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
++ spin_unlock_bh(&ar->data_lock);
++ ath12k_warn(ab,
++ "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
++ ppdu_info->ppdu_stats.common.num_users,
++ HTT_PPDU_STATS_MAX_USERS);
++ ret = -EINVAL;
++ goto exit;
++ }
++
+ /* back up data rate tlv for all peers */
+ if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
+ (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
+@@ -1641,11 +1658,12 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
+ msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
+ pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
+ HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
+- ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+
++ rcu_read_lock();
++ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
+- return;
++ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+@@ -1661,6 +1679,8 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
+ pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
+
+ spin_unlock_bh(&ar->data_lock);
++exit:
++ rcu_read_unlock();
+ }
+
+ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
+@@ -2747,6 +2767,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
+ }
+
+ peer->tfm_mmic = tfm;
++ peer->dp_setup_done = true;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+@@ -3213,6 +3234,14 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
+ ret = -ENOENT;
+ goto out_unlock;
+ }
++
++ if (!peer->dp_setup_done) {
++ ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
++ peer->addr, peer_id);
++ ret = -ENOENT;
++ goto out_unlock;
++ }
++
+ rx_tid = &peer->rx_tid[tid];
+
+ if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
+diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
+index 42f1140baa4fe..f83d3e09ae366 100644
+--- a/drivers/net/wireless/ath/ath12k/mhi.c
++++ b/drivers/net/wireless/ath/ath12k/mhi.c
+@@ -370,8 +370,7 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
+ ret = ath12k_mhi_get_msi(ab_pci);
+ if (ret) {
+ ath12k_err(ab, "failed to get msi for mhi\n");
+- mhi_free_controller(mhi_ctrl);
+- return ret;
++ goto free_controller;
+ }
+
+ mhi_ctrl->iova_start = 0;
+@@ -388,11 +387,15 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
+ ret = mhi_register_controller(mhi_ctrl, ab->hw_params->mhi_config);
+ if (ret) {
+ ath12k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
+- mhi_free_controller(mhi_ctrl);
+- return ret;
++ goto free_controller;
+ }
+
+ return 0;
++
++free_controller:
++ mhi_free_controller(mhi_ctrl);
++ ab_pci->mhi_ctrl = NULL;
++ return ret;
+ }
+
+ void ath12k_mhi_unregister(struct ath12k_pci *ab_pci)
+diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
+index b296dc0e2f671..c6edb24cbedd8 100644
+--- a/drivers/net/wireless/ath/ath12k/peer.h
++++ b/drivers/net/wireless/ath/ath12k/peer.h
+@@ -44,6 +44,9 @@ struct ath12k_peer {
+ struct ppdu_user_delayba ppdu_stats_delayba;
+ bool delayba_flag;
+ bool is_authorized;
++
++ /* protected by ab->data_lock */
++ bool dp_setup_done;
+ };
+
+ void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index eebc5a65ce3b4..491d23ddabf06 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -3799,6 +3799,12 @@ static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
+ ath12k_warn(soc, "failed to extract reg cap %d\n", i);
+ return ret;
+ }
++
++ if (reg_cap.phy_id >= MAX_RADIOS) {
++ ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
++ return -EINVAL;
++ }
++
+ soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
+ }
+ return 0;
+@@ -6228,6 +6234,8 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
+ ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ ev->freq_offset, ev->sidx);
+
++ rcu_read_lock();
++
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
+
+ if (!ar) {
+@@ -6245,6 +6253,8 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
+ ieee80211_radar_detected(ar->hw);
+
+ exit:
++ rcu_read_unlock();
++
+ kfree(tb);
+ }
+
+@@ -6263,11 +6273,16 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
+
++ rcu_read_lock();
++
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
+- return;
++ goto exit;
+ }
++
++exit:
++ rcu_read_unlock();
+ }
+
+ static void ath12k_fils_discovery_event(struct ath12k_base *ab,
+diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
+index fb7a2952d0ce8..d9bac1c343490 100644
+--- a/drivers/net/wireless/ath/ath9k/debug.c
++++ b/drivers/net/wireless/ath/ath9k/debug.c
+@@ -1333,7 +1333,7 @@ void ath9k_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *ath9k_gstrings_stats,
++ memcpy(data, ath9k_gstrings_stats,
+ sizeof(ath9k_gstrings_stats));
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+index c55aab01fff5d..e79bbcd3279af 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+@@ -428,7 +428,7 @@ void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *ath9k_htc_gstrings_stats,
++ memcpy(data, ath9k_htc_gstrings_stats,
+ sizeof(ath9k_htc_gstrings_stats));
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+index 6e1ad65527d12..4ab55a1fcbf04 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+@@ -60,7 +60,7 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {
+ link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,
+ mvmvif);
+- if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)
++ if (link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))
+ return -EINVAL;
+
+ rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],
+@@ -243,7 +243,7 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ int ret;
+
+ if (WARN_ON(!link_info ||
+- link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
++ link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ return -EINVAL;
+
+ RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 2ede69132fee9..177a4628a913e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -536,16 +536,20 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+ flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
+
+ /*
+- * For data packets rate info comes from the fw. Only
+- * set rate/antenna during connection establishment or in case
+- * no station is given.
++ * For data and mgmt packets rate info comes from the fw. Only
++ * set rate/antenna for injected frames with fixed rate, or
++ * when no sta is given.
+ */
+- if (!sta || !ieee80211_is_data(hdr->frame_control) ||
+- mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
++ if (unlikely(!sta ||
++ info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
+ flags |= IWL_TX_FLAGS_CMD_RATE;
+ rate_n_flags =
+ iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
+ hdr->frame_control);
++ } else if (!ieee80211_is_data(hdr->frame_control) ||
++ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
++ /* These are important frames */
++ flags |= IWL_TX_FLAGS_HIGH_PRI;
+ }
+
+ if (mvm->trans->trans_cfg->device_family >=
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+index 95610a117d2f0..ed5a220763ce6 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+@@ -17,6 +17,8 @@ static const struct pci_device_id mt7921_pci_device_table[] = {
+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922),
+ .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
++ { PCI_DEVICE(PCI_VENDOR_ID_ITTIM, 0x7922),
++ .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
+ .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
+index 58bbf50081e47..9eb115c79c90a 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
++++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
+@@ -1492,7 +1492,7 @@ int wilc_wlan_init(struct net_device *dev)
+ }
+
+ if (!wilc->vmm_table)
+- wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
++ wilc->vmm_table = kcalloc(WILC_VMM_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+
+ if (!wilc->vmm_table) {
+ ret = -ENOBUFS;
+diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
+index 94ee831b5de35..506d2f31efb5a 100644
+--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
++++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
+@@ -666,7 +666,7 @@ static void plfxlc_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *et_strings, sizeof(et_strings));
++ memcpy(data, et_strings, sizeof(et_strings));
+ }
+
+ static void plfxlc_get_et_stats(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
+index 23307c8baea21..6dc153a267872 100644
+--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
++++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
+@@ -3170,7 +3170,7 @@ static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw,
+ u32 sset, u8 *data)
+ {
+ if (sset == ETH_SS_STATS)
+- memcpy(data, *mac80211_hwsim_gstrings_stats,
++ memcpy(data, mac80211_hwsim_gstrings_stats,
+ sizeof(mac80211_hwsim_gstrings_stats));
+ }
+
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index e692809ff8227..3219c51777507 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -100,6 +100,32 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
+ return IORESOURCE_MEM;
+ }
+
++static u64 of_bus_default_flags_map(__be32 *addr, const __be32 *range, int na,
++ int ns, int pna)
++{
++ u64 cp, s, da;
++
++ /* Check that flags match */
++ if (*addr != *range)
++ return OF_BAD_ADDR;
++
++ /* Read address values, skipping high cell */
++ cp = of_read_number(range + 1, na - 1);
++ s = of_read_number(range + na + pna, ns);
++ da = of_read_number(addr + 1, na - 1);
++
++ pr_debug("default flags map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
++
++ if (da < cp || da >= (cp + s))
++ return OF_BAD_ADDR;
++ return da - cp;
++}
++
++static int of_bus_default_flags_translate(__be32 *addr, u64 offset, int na)
++{
++ /* Keep "flags" part (high cell) in translated address */
++ return of_bus_default_translate(addr + 1, offset, na - 1);
++}
+
+ #ifdef CONFIG_PCI
+ static unsigned int of_bus_pci_get_flags(const __be32 *addr)
+@@ -374,8 +400,8 @@ static struct of_bus of_busses[] = {
+ .addresses = "reg",
+ .match = of_bus_default_flags_match,
+ .count_cells = of_bus_default_count_cells,
+- .map = of_bus_default_map,
+- .translate = of_bus_default_translate,
++ .map = of_bus_default_flags_map,
++ .translate = of_bus_default_flags_translate,
+ .has_flags = true,
+ .get_flags = of_bus_default_flags_get_flags,
+ },
+diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
+index 6f5e5f0230d39..332bcc0053a5e 100644
+--- a/drivers/parisc/power.c
++++ b/drivers/parisc/power.c
+@@ -197,6 +197,14 @@ static struct notifier_block parisc_panic_block = {
+ .priority = INT_MAX,
+ };
+
++/* qemu soft power-off function */
++static int qemu_power_off(struct sys_off_data *data)
++{
++ /* this turns the system off via SeaBIOS */
++ gsc_writel(0, (unsigned long) data->cb_data);
++ pdc_soft_power_button(1);
++ return NOTIFY_DONE;
++}
+
+ static int __init power_init(void)
+ {
+@@ -226,7 +234,13 @@ static int __init power_init(void)
+ soft_power_reg);
+ }
+
+- power_task = kthread_run(kpowerswd, (void*)soft_power_reg, KTHREAD_NAME);
++ power_task = NULL;
++ if (running_on_qemu && soft_power_reg)
++ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_DEFAULT,
++ qemu_power_off, (void *)soft_power_reg);
++ else
++ power_task = kthread_run(kpowerswd, (void*)soft_power_reg,
++ KTHREAD_NAME);
+ if (IS_ERR(power_task)) {
+ printk(KERN_ERR DRIVER_NAME ": thread creation failed. Driver not loaded.\n");
+ pdc_soft_power_button(0);
+diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
+index ec56110055665..e5519978ba475 100644
+--- a/drivers/pci/controller/dwc/pci-exynos.c
++++ b/drivers/pci/controller/dwc/pci-exynos.c
+@@ -375,7 +375,7 @@ fail_probe:
+ return ret;
+ }
+
+-static int __exit exynos_pcie_remove(struct platform_device *pdev)
++static int exynos_pcie_remove(struct platform_device *pdev)
+ {
+ struct exynos_pcie *ep = platform_get_drvdata(pdev);
+
+@@ -431,7 +431,7 @@ static const struct of_device_id exynos_pcie_of_match[] = {
+
+ static struct platform_driver exynos_pcie_driver = {
+ .probe = exynos_pcie_probe,
+- .remove = __exit_p(exynos_pcie_remove),
++ .remove = exynos_pcie_remove,
+ .driver = {
+ .name = "exynos-pcie",
+ .of_match_table = exynos_pcie_of_match,
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index 78818853af9e4..d2634dafb68e5 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -1101,7 +1101,7 @@ static const struct of_device_id ks_pcie_of_match[] = {
+ { },
+ };
+
+-static int __init ks_pcie_probe(struct platform_device *pdev)
++static int ks_pcie_probe(struct platform_device *pdev)
+ {
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
+@@ -1303,7 +1303,7 @@ err_link:
+ return ret;
+ }
+
+-static int __exit ks_pcie_remove(struct platform_device *pdev)
++static int ks_pcie_remove(struct platform_device *pdev)
+ {
+ struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+ struct device_link **link = ks_pcie->link;
+@@ -1319,9 +1319,9 @@ static int __exit ks_pcie_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-static struct platform_driver ks_pcie_driver __refdata = {
++static struct platform_driver ks_pcie_driver = {
+ .probe = ks_pcie_probe,
+- .remove = __exit_p(ks_pcie_remove),
++ .remove = ks_pcie_remove,
+ .driver = {
+ .name = "keystone-pcie",
+ .of_match_table = ks_pcie_of_match,
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index 1f2ee71da4da2..8e6f6ac42dc96 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -732,6 +732,53 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+
+ }
+
++static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
++{
++ u32 lnkcap, lwsc, plc;
++ u8 cap;
++
++ if (!num_lanes)
++ return;
++
++ /* Set the number of lanes */
++ plc = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
++ plc &= ~PORT_LINK_FAST_LINK_MODE;
++ plc &= ~PORT_LINK_MODE_MASK;
++
++ /* Set link width speed control register */
++ lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
++ lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
++ switch (num_lanes) {
++ case 1:
++ plc |= PORT_LINK_MODE_1_LANES;
++ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
++ break;
++ case 2:
++ plc |= PORT_LINK_MODE_2_LANES;
++ lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES;
++ break;
++ case 4:
++ plc |= PORT_LINK_MODE_4_LANES;
++ lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES;
++ break;
++ case 8:
++ plc |= PORT_LINK_MODE_8_LANES;
++ lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES;
++ break;
++ default:
++ dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
++ return;
++ }
++ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, plc);
++ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, lwsc);
++
++ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
++ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
++ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
++ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, num_lanes);
++ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
++}
++
+ void dw_pcie_iatu_detect(struct dw_pcie *pci)
+ {
+ int max_region, ob, ib;
+@@ -1013,49 +1060,5 @@ void dw_pcie_setup(struct dw_pcie *pci)
+ val |= PORT_LINK_DLL_LINK_EN;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+- if (!pci->num_lanes) {
+- dev_dbg(pci->dev, "Using h/w default number of lanes\n");
+- return;
+- }
+-
+- /* Set the number of lanes */
+- val &= ~PORT_LINK_FAST_LINK_MODE;
+- val &= ~PORT_LINK_MODE_MASK;
+- switch (pci->num_lanes) {
+- case 1:
+- val |= PORT_LINK_MODE_1_LANES;
+- break;
+- case 2:
+- val |= PORT_LINK_MODE_2_LANES;
+- break;
+- case 4:
+- val |= PORT_LINK_MODE_4_LANES;
+- break;
+- case 8:
+- val |= PORT_LINK_MODE_8_LANES;
+- break;
+- default:
+- dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
+- return;
+- }
+- dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+-
+- /* Set link width speed control register */
+- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+- val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+- switch (pci->num_lanes) {
+- case 1:
+- val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+- break;
+- case 2:
+- val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+- break;
+- case 4:
+- val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+- break;
+- case 8:
+- val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
+- break;
+- }
+- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
++ dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
+ }
+diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
+index d09507f822a7d..a824d8e8edb9d 100644
+--- a/drivers/pci/controller/dwc/pcie-kirin.c
++++ b/drivers/pci/controller/dwc/pcie-kirin.c
+@@ -742,7 +742,7 @@ err:
+ return ret;
+ }
+
+-static int __exit kirin_pcie_remove(struct platform_device *pdev)
++static int kirin_pcie_remove(struct platform_device *pdev)
+ {
+ struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
+
+@@ -819,7 +819,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
+
+ static struct platform_driver kirin_pcie_driver = {
+ .probe = kirin_pcie_probe,
+- .remove = __exit_p(kirin_pcie_remove),
++ .remove = kirin_pcie_remove,
+ .driver = {
+ .name = "kirin-pcie",
+ .of_match_table = kirin_pcie_match,
+diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+index 267e1247d548f..4a9741428619f 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
++++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
+@@ -121,6 +121,7 @@
+
+ /* ELBI registers */
+ #define ELBI_SYS_STTS 0x08
++#define ELBI_CS2_ENABLE 0xa4
+
+ /* DBI registers */
+ #define DBI_CON_STATUS 0x44
+@@ -253,6 +254,21 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
+ disable_irq(pcie_ep->perst_irq);
+ }
+
++static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
++ u32 reg, size_t size, u32 val)
++{
++ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
++ int ret;
++
++ writel(1, pcie_ep->elbi + ELBI_CS2_ENABLE);
++
++ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
++ if (ret)
++ dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
++
++ writel(0, pcie_ep->elbi + ELBI_CS2_ENABLE);
++}
++
+ static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
+ {
+ int ret;
+@@ -451,6 +467,7 @@ static const struct dw_pcie_ops pci_ops = {
+ .link_up = qcom_pcie_dw_link_up,
+ .start_link = qcom_pcie_dw_start_link,
+ .stop_link = qcom_pcie_dw_stop_link,
++ .write_dbi2 = qcom_pcie_dw_write_dbi2,
+ };
+
+ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index ccff8cde5cff6..07cb0818a5138 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -9,6 +9,7 @@
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/debugfs.h>
+ #include <linux/delay.h>
+@@ -347,8 +348,7 @@ static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
+ */
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+ if (val & PCI_EXP_LNKSTA_LBMS) {
+- current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
+- PCI_EXP_LNKSTA_NLW_SHIFT;
++ current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+ if (pcie->init_link_width > current_link_width) {
+ dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+@@ -761,8 +761,7 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
+
+ val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+- pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
+- PCI_EXP_LNKSTA_NLW_SHIFT;
++ pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
+
+ val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL);
+@@ -921,7 +920,7 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
+ /* Configure Max lane width from DT */
+ val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_MLW;
+- val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
++ val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes);
+ dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
+
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
+index c931b1b07b1d8..0cacd58f6c05e 100644
+--- a/drivers/pci/controller/pci-mvebu.c
++++ b/drivers/pci/controller/pci-mvebu.c
+@@ -265,7 +265,7 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
+ */
+ lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
+- lnkcap |= (port->is_x4 ? 4 : 1) << 4;
++ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, port->is_x4 ? 4 : 1);
+ mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+
+ /* Disable Root Bridge I/O space, memory space and bus mastering. */
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index a05350a4e49cb..05b7357bd2586 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -911,7 +911,7 @@ pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
+ {
+ int acpi_state, d_max;
+
+- if (pdev->no_d3cold)
++ if (pdev->no_d3cold || !pdev->d3cold_allowed)
+ d_max = ACPI_STATE_D3_HOT;
+ else
+ d_max = ACPI_STATE_D3_COLD;
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index ab32a91f287b4..e1e53d1b88a46 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -12,7 +12,7 @@
+ * Modeled after usb's driverfs.c
+ */
+
+-
++#include <linux/bitfield.h>
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/pci.h>
+@@ -230,8 +230,7 @@ static ssize_t current_link_width_show(struct device *dev,
+ if (err)
+ return -EINVAL;
+
+- return sysfs_emit(buf, "%u\n",
+- (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
++ return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat));
+ }
+ static DEVICE_ATTR_RO(current_link_width);
+
+@@ -530,10 +529,7 @@ static ssize_t d3cold_allowed_store(struct device *dev,
+ return -EINVAL;
+
+ pdev->d3cold_allowed = !!val;
+- if (pdev->d3cold_allowed)
+- pci_d3cold_enable(pdev);
+- else
+- pci_d3cold_disable(pdev);
++ pci_bridge_d3_update(pdev);
+
+ pm_runtime_resume(dev);
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 702fe577089b4..a7793abdd74ee 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -732,15 +732,18 @@ u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
+ {
+ u16 vsec = 0;
+ u32 header;
++ int ret;
+
+ if (vendor != dev->vendor)
+ return 0;
+
+ while ((vsec = pci_find_next_ext_capability(dev, vsec,
+ PCI_EXT_CAP_ID_VNDR))) {
+- if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
+- &header) == PCIBIOS_SUCCESSFUL &&
+- PCI_VNDR_HEADER_ID(header) == cap)
++ ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
++ if (ret != PCIBIOS_SUCCESSFUL)
++ continue;
++
++ if (PCI_VNDR_HEADER_ID(header) == cap)
+ return vsec;
+ }
+
+@@ -3743,14 +3746,14 @@ u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
+ return 0;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
+- cap &= PCI_REBAR_CAP_SIZES;
++ cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
+
+ /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
+ if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
+- bar == 0 && cap == 0x7000)
+- cap = 0x3f000;
++ bar == 0 && cap == 0x700)
++ return 0x3f00;
+
+- return cap >> 4;
++ return cap;
+ }
+ EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
+
+@@ -6252,8 +6255,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
+ pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+
+ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+- next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+- PCI_EXP_LNKSTA_NLW_SHIFT;
++ next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
+
+ next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
+
+@@ -6325,7 +6327,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
+
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ if (lnkcap)
+- return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
++ return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
+
+ return PCIE_LNK_WIDTH_UNKNOWN;
+ }
+diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
+index f6c24ded134cd..67025ee2b7454 100644
+--- a/drivers/pci/pcie/aer.c
++++ b/drivers/pci/pcie/aer.c
+@@ -29,6 +29,7 @@
+ #include <linux/kfifo.h>
+ #include <linux/slab.h>
+ #include <acpi/apei.h>
++#include <acpi/ghes.h>
+ #include <ras/ras_event.h>
+
+ #include "../pci.h"
+@@ -1010,6 +1011,15 @@ static void aer_recover_work_func(struct work_struct *work)
+ continue;
+ }
+ cper_print_aer(pdev, entry.severity, entry.regs);
++ /*
++ * Memory for aer_capability_regs(entry.regs) is being allocated from the
++ * ghes_estatus_pool to protect it from overwriting when multiple sections
++ * are present in the error status. Thus free the same after processing
++ * the data.
++ */
++ ghes_estatus_pool_region_free((unsigned long)entry.regs,
++ sizeof(struct aer_capability_regs));
++
+ if (entry.severity == AER_NONFATAL)
+ pcie_do_recovery(pdev, pci_channel_io_normal,
+ aer_root_reset);
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 530c3bb5708c5..fc18e42f0a6ed 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -1248,6 +1248,8 @@ static ssize_t aspm_attr_store_common(struct device *dev,
+ link->aspm_disable &= ~ASPM_STATE_L1;
+ } else {
+ link->aspm_disable |= state;
++ if (state & ASPM_STATE_L1)
++ link->aspm_disable |= ASPM_STATE_L1SS;
+ }
+
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 24a83cf5ace8c..cd08d39fdb1ff 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1653,15 +1653,15 @@ static void pci_set_removable(struct pci_dev *dev)
+ static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
+ {
+ #ifdef CONFIG_PCI_QUIRKS
+- int pos;
++ int pos, ret;
+ u32 header, tmp;
+
+ pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
+
+ for (pos = PCI_CFG_SPACE_SIZE;
+ pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
+- if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
+- || header != tmp)
++ ret = pci_read_config_dword(dev, pos, &tmp);
++ if ((ret != PCIBIOS_SUCCESSFUL) || (header != tmp))
+ return false;
+ }
+
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index eb65170b97ff0..d78c75fedf112 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -5383,7 +5383,7 @@ int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
+ */
+ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+ {
+- int pos, i = 0;
++ int pos, i = 0, ret;
+ u8 next_cap;
+ u16 reg16, *cap;
+ struct pci_cap_saved_state *state;
+@@ -5429,8 +5429,8 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+ pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+
+ pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
+- if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
+- PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
++ ret = pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status);
++ if ((ret != PCIBIOS_SUCCESSFUL) || (PCI_POSSIBLE_ERROR(status)))
+ pdev->cfg_size = PCI_CFG_SPACE_SIZE;
+
+ if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
+@@ -5507,6 +5507,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
+
+ #ifdef CONFIG_PCI_ATS
++static void quirk_no_ats(struct pci_dev *pdev)
++{
++ pci_info(pdev, "disabling ATS\n");
++ pdev->ats_cap = 0;
++}
++
+ /*
+ * Some devices require additional driver setup to enable ATS. Don't use
+ * ATS for those devices as ATS will be enabled before the driver has had a
+@@ -5520,14 +5526,10 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
+ (pdev->subsystem_device == 0xce19 ||
+ pdev->subsystem_device == 0xcc10 ||
+ pdev->subsystem_device == 0xcc08))
+- goto no_ats;
+- else
+- return;
++ quirk_no_ats(pdev);
++ } else {
++ quirk_no_ats(pdev);
+ }
+-
+-no_ats:
+- pci_info(pdev, "disabling ATS\n");
+- pdev->ats_cap = 0;
+ }
+
+ /* AMD Stoney platform GPU */
+@@ -5550,6 +5552,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats);
+ /* AMD Raven platform iGPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
++
++/*
++ * Intel IPU E2000 revisions before C0 implement incorrect endianness
++ * in ATS Invalidate Request message body. Disable ATS for those devices.
++ */
++static void quirk_intel_e2000_no_ats(struct pci_dev *pdev)
++{
++ if (pdev->revision < 0x20)
++ quirk_no_ats(pdev);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1451, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1452, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1453, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1454, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1455, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1457, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1459, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145a, quirk_intel_e2000_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145c, quirk_intel_e2000_no_ats);
+ #endif /* CONFIG_PCI_ATS */
+
+ /* Freescale PCIe doesn't support MSI in RC mode */
+@@ -6140,3 +6161,15 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+ #endif
++
++/*
++ * Devices known to require a longer delay before first config space access
++ * after reset recovery or resume from D3cold:
++ *
++ * VideoPropulsion (aka Genroco) Torrent QN16e MPEG QAM Modulator
++ */
++static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev)
++{
++ pdev->d3cold_delay = 1000;
++}
++DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec);
+diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
+index e2b7827c45635..9363c31f31b89 100644
+--- a/drivers/perf/arm_cspmu/arm_cspmu.c
++++ b/drivers/perf/arm_cspmu/arm_cspmu.c
+@@ -635,6 +635,9 @@ static int arm_cspmu_event_init(struct perf_event *event)
+
+ cspmu = to_arm_cspmu(event->pmu);
+
++ if (event->attr.type != event->pmu->type)
++ return -ENOENT;
++
+ /*
+ * Following other "uncore" PMUs, we do not support sampling mode or
+ * attach to a task (per-process mode).
+diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
+index 4163ff5174715..6225239b64550 100644
+--- a/drivers/perf/riscv_pmu_sbi.c
++++ b/drivers/perf/riscv_pmu_sbi.c
+@@ -629,6 +629,11 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
+
+ /* Firmware counter don't support overflow yet */
+ fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
++ if (fidx == RISCV_MAX_COUNTERS) {
++ csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
++ return IRQ_NONE;
++ }
++
+ event = cpu_hw_evt->events[fidx];
+ if (!event) {
+ csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+index 90f8543ba265b..6777532dd4dc9 100644
+--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
++++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+@@ -25,23 +25,73 @@
+ #define EUSB2_FORCE_VAL_5 0xeD
+ #define V_CLK_19P2M_EN BIT(6)
+
++#define EUSB2_TUNE_USB2_CROSSOVER 0x50
+ #define EUSB2_TUNE_IUSB2 0x51
++#define EUSB2_TUNE_RES_FSDIF 0x52
++#define EUSB2_TUNE_HSDISC 0x53
+ #define EUSB2_TUNE_SQUELCH_U 0x54
++#define EUSB2_TUNE_USB2_SLEW 0x55
++#define EUSB2_TUNE_USB2_EQU 0x56
+ #define EUSB2_TUNE_USB2_PREEM 0x57
++#define EUSB2_TUNE_USB2_HS_COMP_CUR 0x58
++#define EUSB2_TUNE_EUSB_SLEW 0x59
++#define EUSB2_TUNE_EUSB_EQU 0x5A
++#define EUSB2_TUNE_EUSB_HS_COMP_CUR 0x5B
+
+-#define QCOM_EUSB2_REPEATER_INIT_CFG(o, v) \
++#define QCOM_EUSB2_REPEATER_INIT_CFG(r, v) \
+ { \
+- .offset = o, \
++ .reg = r, \
+ .val = v, \
+ }
+
+-struct eusb2_repeater_init_tbl {
+- unsigned int offset;
+- unsigned int val;
++enum reg_fields {
++ F_TUNE_EUSB_HS_COMP_CUR,
++ F_TUNE_EUSB_EQU,
++ F_TUNE_EUSB_SLEW,
++ F_TUNE_USB2_HS_COMP_CUR,
++ F_TUNE_USB2_PREEM,
++ F_TUNE_USB2_EQU,
++ F_TUNE_USB2_SLEW,
++ F_TUNE_SQUELCH_U,
++ F_TUNE_HSDISC,
++ F_TUNE_RES_FSDIF,
++ F_TUNE_IUSB2,
++ F_TUNE_USB2_CROSSOVER,
++ F_NUM_TUNE_FIELDS,
++
++ F_FORCE_VAL_5 = F_NUM_TUNE_FIELDS,
++ F_FORCE_EN_5,
++
++ F_EN_CTL1,
++
++ F_RPTR_STATUS,
++ F_NUM_FIELDS,
++};
++
++static struct reg_field eusb2_repeater_tune_reg_fields[F_NUM_FIELDS] = {
++ [F_TUNE_EUSB_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_EUSB_HS_COMP_CUR, 0, 1),
++ [F_TUNE_EUSB_EQU] = REG_FIELD(EUSB2_TUNE_EUSB_EQU, 0, 1),
++ [F_TUNE_EUSB_SLEW] = REG_FIELD(EUSB2_TUNE_EUSB_SLEW, 0, 1),
++ [F_TUNE_USB2_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_USB2_HS_COMP_CUR, 0, 1),
++ [F_TUNE_USB2_PREEM] = REG_FIELD(EUSB2_TUNE_USB2_PREEM, 0, 2),
++ [F_TUNE_USB2_EQU] = REG_FIELD(EUSB2_TUNE_USB2_EQU, 0, 1),
++ [F_TUNE_USB2_SLEW] = REG_FIELD(EUSB2_TUNE_USB2_SLEW, 0, 1),
++ [F_TUNE_SQUELCH_U] = REG_FIELD(EUSB2_TUNE_SQUELCH_U, 0, 2),
++ [F_TUNE_HSDISC] = REG_FIELD(EUSB2_TUNE_HSDISC, 0, 2),
++ [F_TUNE_RES_FSDIF] = REG_FIELD(EUSB2_TUNE_RES_FSDIF, 0, 2),
++ [F_TUNE_IUSB2] = REG_FIELD(EUSB2_TUNE_IUSB2, 0, 3),
++ [F_TUNE_USB2_CROSSOVER] = REG_FIELD(EUSB2_TUNE_USB2_CROSSOVER, 0, 2),
++
++ [F_FORCE_VAL_5] = REG_FIELD(EUSB2_FORCE_VAL_5, 0, 7),
++ [F_FORCE_EN_5] = REG_FIELD(EUSB2_FORCE_EN_5, 0, 7),
++
++ [F_EN_CTL1] = REG_FIELD(EUSB2_EN_CTL1, 0, 7),
++
++ [F_RPTR_STATUS] = REG_FIELD(EUSB2_RPTR_STATUS, 0, 7),
+ };
+
+ struct eusb2_repeater_cfg {
+- const struct eusb2_repeater_init_tbl *init_tbl;
++ const u32 *init_tbl;
+ int init_tbl_num;
+ const char * const *vreg_list;
+ int num_vregs;
+@@ -49,11 +99,10 @@ struct eusb2_repeater_cfg {
+
+ struct eusb2_repeater {
+ struct device *dev;
+- struct regmap *regmap;
++ struct regmap_field *regs[F_NUM_FIELDS];
+ struct phy *phy;
+ struct regulator_bulk_data *vregs;
+ const struct eusb2_repeater_cfg *cfg;
+- u16 base;
+ enum phy_mode mode;
+ };
+
+@@ -61,10 +110,10 @@ static const char * const pm8550b_vreg_l[] = {
+ "vdd18", "vdd3",
+ };
+
+-static const struct eusb2_repeater_init_tbl pm8550b_init_tbl[] = {
+- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_IUSB2, 0x8),
+- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_SQUELCH_U, 0x3),
+- QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_USB2_PREEM, 0x5),
++static const u32 pm8550b_init_tbl[F_NUM_TUNE_FIELDS] = {
++ [F_TUNE_IUSB2] = 0x8,
++ [F_TUNE_SQUELCH_U] = 0x3,
++ [F_TUNE_USB2_PREEM] = 0x5,
+ };
+
+ static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
+@@ -92,9 +141,9 @@ static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
+
+ static int eusb2_repeater_init(struct phy *phy)
+ {
++ struct reg_field *regfields = eusb2_repeater_tune_reg_fields;
+ struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+- const struct eusb2_repeater_init_tbl *init_tbl = rptr->cfg->init_tbl;
+- int num = rptr->cfg->init_tbl_num;
++ const u32 *init_tbl = rptr->cfg->init_tbl;
+ u32 val;
+ int ret;
+ int i;
+@@ -103,17 +152,21 @@ static int eusb2_repeater_init(struct phy *phy)
+ if (ret)
+ return ret;
+
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_EN_CTL1,
+- EUSB2_RPTR_EN, EUSB2_RPTR_EN);
++ regmap_field_update_bits(rptr->regs[F_EN_CTL1], EUSB2_RPTR_EN, EUSB2_RPTR_EN);
+
+- for (i = 0; i < num; i++)
+- regmap_update_bits(rptr->regmap,
+- rptr->base + init_tbl[i].offset,
+- init_tbl[i].val, init_tbl[i].val);
++ for (i = 0; i < F_NUM_TUNE_FIELDS; i++) {
++ if (init_tbl[i]) {
++ regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
++ } else {
++ /* Write 0 if there's no value set */
++ u32 mask = GENMASK(regfields[i].msb, regfields[i].lsb);
++
++ regmap_field_update_bits(rptr->regs[i], mask, 0);
++ }
++ }
+
+- ret = regmap_read_poll_timeout(rptr->regmap,
+- rptr->base + EUSB2_RPTR_STATUS, val,
+- val & RPTR_OK, 10, 5);
++ ret = regmap_field_read_poll_timeout(rptr->regs[F_RPTR_STATUS],
++ val, val & RPTR_OK, 10, 5);
+ if (ret)
+ dev_err(rptr->dev, "initialization timed-out\n");
+
+@@ -132,10 +185,10 @@ static int eusb2_repeater_set_mode(struct phy *phy,
+ * per eUSB 1.2 Spec. Below implement software workaround until
+ * PHY and controller is fixing seen observation.
+ */
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
+- F_CLK_19P2M_EN, F_CLK_19P2M_EN);
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
+- V_CLK_19P2M_EN, V_CLK_19P2M_EN);
++ regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
++ F_CLK_19P2M_EN, F_CLK_19P2M_EN);
++ regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
++ V_CLK_19P2M_EN, V_CLK_19P2M_EN);
+ break;
+ case PHY_MODE_USB_DEVICE:
+ /*
+@@ -144,10 +197,10 @@ static int eusb2_repeater_set_mode(struct phy *phy,
+ * repeater doesn't clear previous value due to shared
+ * regulators (say host <-> device mode switch).
+ */
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
+- F_CLK_19P2M_EN, 0);
+- regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
+- V_CLK_19P2M_EN, 0);
++ regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
++ F_CLK_19P2M_EN, 0);
++ regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
++ V_CLK_19P2M_EN, 0);
+ break;
+ default:
+ return -EINVAL;
+@@ -176,8 +229,9 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct device_node *np = dev->of_node;
++ struct regmap *regmap;
++ int i, ret;
+ u32 res;
+- int ret;
+
+ rptr = devm_kzalloc(dev, sizeof(*rptr), GFP_KERNEL);
+ if (!rptr)
+@@ -190,15 +244,22 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
+ if (!rptr->cfg)
+ return -EINVAL;
+
+- rptr->regmap = dev_get_regmap(dev->parent, NULL);
+- if (!rptr->regmap)
++ regmap = dev_get_regmap(dev->parent, NULL);
++ if (!regmap)
+ return -ENODEV;
+
+ ret = of_property_read_u32(np, "reg", &res);
+ if (ret < 0)
+ return ret;
+
+- rptr->base = res;
++ for (i = 0; i < F_NUM_FIELDS; i++)
++ eusb2_repeater_tune_reg_fields[i].reg += res;
++
++ ret = devm_regmap_field_bulk_alloc(dev, regmap, rptr->regs,
++ eusb2_repeater_tune_reg_fields,
++ F_NUM_FIELDS);
++ if (ret)
++ return ret;
+
+ ret = eusb2_repeater_init_vregs(rptr);
+ if (ret < 0) {
+diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
+index 5b9748e0463bc..63e38671e95a6 100644
+--- a/drivers/platform/chrome/cros_ec_proto_test.c
++++ b/drivers/platform/chrome/cros_ec_proto_test.c
+@@ -2668,6 +2668,7 @@ static int cros_ec_proto_test_init(struct kunit *test)
+ ec_dev->dev->release = cros_ec_proto_test_release;
+ ec_dev->cmd_xfer = cros_kunit_ec_xfer_mock;
+ ec_dev->pkt_xfer = cros_kunit_ec_xfer_mock;
++ mutex_init(&ec_dev->lock);
+
+ priv->msg = (struct cros_ec_command *)priv->_msg;
+
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index ad460417f901a..4b13d3e704bf3 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -9810,6 +9810,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
+ * Individual addressing is broken on models that expose the
+ * primary battery as BAT1.
+ */
++ TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */
+ TPACPI_Q_LNV('J', '7', true), /* B5400 */
+ TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */
+ TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
+diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
+index e618ed5aa8caa..fd37831dc98f8 100644
+--- a/drivers/powercap/intel_rapl_common.c
++++ b/drivers/powercap/intel_rapl_common.c
+@@ -892,7 +892,7 @@ static int rapl_write_pl_data(struct rapl_domain *rd, int pl,
+ return -EINVAL;
+
+ if (rd->rpl[pl].locked) {
+- pr_warn("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]);
++ pr_debug("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]);
+ return -EACCES;
+ }
+
+diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
+index 362bf756e6b78..5a3a4cc0bec82 100644
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -490,7 +490,8 @@ ssize_t ptp_read(struct posix_clock *pc,
+
+ for (i = 0; i < cnt; i++) {
+ event[i] = queue->buf[queue->head];
+- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++ /* Paired with READ_ONCE() in queue_cnt() */
++ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ }
+
+ spin_unlock_irqrestore(&queue->lock, flags);
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index 80f74e38c2da4..9a50bfb56453c 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -56,10 +56,11 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
+ dst->t.sec = seconds;
+ dst->t.nsec = remainder;
+
++ /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
+ if (!queue_free(queue))
+- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+
+- queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
++ WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
+
+ spin_unlock_irqrestore(&queue->lock, flags);
+ }
+diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
+index 75f58fc468a71..b8d4f61f14be4 100644
+--- a/drivers/ptp/ptp_private.h
++++ b/drivers/ptp/ptp_private.h
+@@ -76,9 +76,13 @@ struct ptp_vclock {
+ * that a writer might concurrently increment the tail does not
+ * matter, since the queue remains nonempty nonetheless.
+ */
+-static inline int queue_cnt(struct timestamp_event_queue *q)
++static inline int queue_cnt(const struct timestamp_event_queue *q)
+ {
+- int cnt = q->tail - q->head;
++ /*
++ * Paired with WRITE_ONCE() in enqueue_external_timestamp(),
++ * ptp_read(), extts_fifo_show().
++ */
++ int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head);
+ return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
+ }
+
+diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
+index 6e4d5456a8851..34ea5c16123a1 100644
+--- a/drivers/ptp/ptp_sysfs.c
++++ b/drivers/ptp/ptp_sysfs.c
+@@ -90,7 +90,8 @@ static ssize_t extts_fifo_show(struct device *dev,
+ qcnt = queue_cnt(queue);
+ if (qcnt) {
+ event = queue->buf[queue->head];
+- queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
++ /* Paired with READ_ONCE() in queue_cnt() */
++ WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
+ }
+ spin_unlock_irqrestore(&queue->lock, flags);
+
+diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
+index 8028f76923b24..ea3c19156c25e 100644
+--- a/drivers/s390/crypto/ap_bus.c
++++ b/drivers/s390/crypto/ap_bus.c
+@@ -1030,6 +1030,10 @@ EXPORT_SYMBOL(ap_driver_unregister);
+
+ void ap_bus_force_rescan(void)
+ {
++ /* Only trigger AP bus scans after the initial scan is done */
++ if (atomic64_read(&ap_scan_bus_count) <= 0)
++ return;
++
+ /* processing a asynchronous bus rescan */
+ del_timer(&ap_config_timer);
+ queue_work(system_long_wq, &ap_scan_work);
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index 2f33e6b4a92fb..9285ae508afa6 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -4861,6 +4861,12 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
+ hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ }
+
++static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
++{
++ debugfs_remove_recursive(hisi_hba->debugfs_dir);
++ hisi_hba->debugfs_dir = NULL;
++}
++
+ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+ {
+ struct device *dev = hisi_hba->dev;
+@@ -4884,18 +4890,13 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
+ if (debugfs_alloc_v3_hw(hisi_hba, i)) {
+- debugfs_remove_recursive(hisi_hba->debugfs_dir);
++ debugfs_exit_v3_hw(hisi_hba);
+ dev_dbg(dev, "failed to init debugfs!\n");
+ break;
+ }
+ }
+ }
+
+-static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
+-{
+- debugfs_remove_recursive(hisi_hba->debugfs_dir);
+-}
+-
+ static int
+ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index 470e8e6c41b62..c98346e464b48 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -1518,7 +1518,11 @@ static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->l_lock, flags);
+- BUG_ON(list_empty(&queue->free));
++ if (list_empty(&queue->free)) {
++ ibmvfc_log(queue->vhost, 4, "empty event pool on queue:%ld\n", queue->hwq_id);
++ spin_unlock_irqrestore(&queue->l_lock, flags);
++ return NULL;
++ }
+ evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+ atomic_set(&evt->free, 0);
+ list_del(&evt->queue_list);
+@@ -1947,9 +1951,15 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
+ if (vhost->using_channels) {
+ scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
+ evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
++ if (!evt)
++ return SCSI_MLQUEUE_HOST_BUSY;
++
+ evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
+- } else
++ } else {
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt)
++ return SCSI_MLQUEUE_HOST_BUSY;
++ }
+
+ ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
+ evt->cmnd = cmnd;
+@@ -2037,6 +2047,11 @@ static int ibmvfc_bsg_timeout(struct bsg_job *job)
+
+ vhost->aborting_passthru = 1;
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return -ENOMEM;
++ }
++
+ ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
+
+ tmf = &evt->iu.tmf;
+@@ -2095,6 +2110,10 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
+ goto unlock_out;
+
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ rc = -ENOMEM;
++ goto unlock_out;
++ }
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ plogi = &evt->iu.plogi;
+ memset(plogi, 0, sizeof(*plogi));
+@@ -2213,6 +2232,11 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
+ }
+
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ rc = -ENOMEM;
++ goto out;
++ }
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.passthru;
+
+@@ -2301,6 +2325,11 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
+ else
+ evt = ibmvfc_get_event(&vhost->crq);
+
++ if (!evt) {
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return -ENOMEM;
++ }
++
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ iu = ibmvfc_get_fcp_iu(vhost, tmf);
+@@ -2504,6 +2533,8 @@ static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
+ struct ibmvfc_tmf *tmf;
+
+ evt = ibmvfc_get_event(queue);
++ if (!evt)
++ return NULL;
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+
+ tmf = &evt->iu.tmf;
+@@ -2560,6 +2591,11 @@ static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
+
+ if (found_evt && vhost->logged_in) {
+ evt = ibmvfc_init_tmf(&queues[i], sdev, type);
++ if (!evt) {
++ spin_unlock(queues[i].q_lock);
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return -ENOMEM;
++ }
+ evt->sync_iu = &queues[i].cancel_rsp;
+ ibmvfc_send_event(evt, vhost, default_timeout);
+ list_add_tail(&evt->cancel, &cancelq);
+@@ -2773,6 +2809,10 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
+
+ if (vhost->state == IBMVFC_ACTIVE) {
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return -ENOMEM;
++ }
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ iu = ibmvfc_get_fcp_iu(vhost, tmf);
+@@ -4031,6 +4071,12 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+@@ -4138,6 +4184,12 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
+ kref_get(&tgt->kref);
+ tgt->logo_rcvd = 0;
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
+@@ -4214,6 +4266,8 @@ static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_t
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt)
++ return NULL;
+ ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+ mad = &evt->iu.implicit_logout;
+@@ -4241,6 +4295,13 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+ vhost->discovery_threads++;
+ evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
+ ibmvfc_tgt_implicit_logout_done);
++ if (!evt) {
++ vhost->discovery_threads--;
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+@@ -4380,6 +4441,12 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
+@@ -4546,6 +4613,14 @@ static void ibmvfc_adisc_timeout(struct timer_list *t)
+ vhost->abort_threads++;
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
++ vhost->abort_threads--;
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ return;
++ }
+ ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
+
+ evt->tgt = tgt;
+@@ -4596,6 +4671,12 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+@@ -4699,6 +4780,12 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
++ kref_put(&tgt->kref, ibmvfc_release_tgt);
++ __ibmvfc_reset_host(vhost);
++ return;
++ }
+ vhost->discovery_threads++;
+ evt->tgt = tgt;
+ ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
+@@ -4871,6 +4958,13 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
+ {
+ struct ibmvfc_discover_targets *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
++ int level = IBMVFC_DEFAULT_LOG_LEVEL;
++
++ if (!evt) {
++ ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
+
+ ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.discover_targets;
+@@ -4948,8 +5042,15 @@ static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
+ struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
+ unsigned int num_channels =
+ min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
++ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+ int i;
+
++ if (!evt) {
++ ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
++
+ memset(setup_buf, 0, sizeof(*setup_buf));
+ if (num_channels == 0)
+ setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
+@@ -5011,6 +5112,13 @@ static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
+ {
+ struct ibmvfc_channel_enquiry *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
++ int level = IBMVFC_DEFAULT_LOG_LEVEL;
++
++ if (!evt) {
++ ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
+
+ ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.channel_enquiry;
+@@ -5133,6 +5241,12 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
+ struct ibmvfc_npiv_login_mad *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+
++ if (!evt) {
++ ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
++
+ ibmvfc_gather_partition_info(vhost);
+ ibmvfc_set_login_info(vhost);
+ ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
+@@ -5197,6 +5311,12 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
+ struct ibmvfc_event *evt;
+
+ evt = ibmvfc_get_event(&vhost->crq);
++ if (!evt) {
++ ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
++ ibmvfc_hard_reset_host(vhost);
++ return;
++ }
++
+ ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
+
+ mad = &evt->iu.npiv_logout;
+diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
+index 9c02c9523c4d4..ab06e9aeb613e 100644
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -241,6 +241,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
+ }
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->ptp_rdata = fc_rport_create(lport, remote_fid);
++ if (!lport->ptp_rdata) {
++ printk(KERN_WARNING "libfc: Failed to setup lport 0x%x\n",
++ lport->port_id);
++ mutex_unlock(&lport->disc.disc_mutex);
++ return;
++ }
+ kref_get(&lport->ptp_rdata->kref);
+ lport->ptp_rdata->ids.port_name = remote_wwpn;
+ lport->ptp_rdata->ids.node_name = remote_wwnn;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index c0d47141f6d38..2a3279b902d60 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -263,13 +263,13 @@ u32 megasas_readl(struct megasas_instance *instance,
+ * Fusion registers could intermittently return all zeroes.
+ * This behavior is transient in nature and subsequent reads will
+ * return valid value. As a workaround in driver, retry readl for
+- * upto three times until a non-zero value is read.
++ * up to thirty times until a non-zero value is read.
+ */
+ if (instance->adapter_type == AERO_SERIES) {
+ do {
+ ret_val = readl(addr);
+ i++;
+- } while (ret_val == 0 && i < 3);
++ } while (ret_val == 0 && i < 30);
+ return ret_val;
+ } else {
+ return readl(addr);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 5284584e4cd2b..2fa56ef7f6594 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -223,8 +223,8 @@ _base_readl_ext_retry(const volatile void __iomem *addr)
+
+ for (i = 0 ; i < 30 ; i++) {
+ ret_val = readl(addr);
+- if (ret_val == 0)
+- continue;
++ if (ret_val != 0)
++ break;
+ }
+
+ return ret_val;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 67176be79dffd..6d6b4ed49612d 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1835,8 +1835,16 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
+ }
+
+ spin_lock_irqsave(qp->qp_lock_ptr, *flags);
+- if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
+- sp->done(sp, res);
++ switch (sp->type) {
++ case SRB_SCSI_CMD:
++ if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
++ sp->done(sp, res);
++ break;
++ default:
++ if (ret_cmd)
++ sp->done(sp, res);
++ break;
++ }
+ } else {
+ sp->done(sp, res);
+ }
+diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
+index f54acffc83f9f..f2b24361c8cac 100644
+--- a/drivers/soc/amlogic/meson-ee-pwrc.c
++++ b/drivers/soc/amlogic/meson-ee-pwrc.c
+@@ -229,7 +229,7 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
+
+ static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_nna[] = {
+ { G12A_HHI_NANOQ_MEM_PD_REG0, GENMASK(31, 0) },
+- { G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(23, 0) },
++ { G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(31, 0) },
+ };
+
+ #define VPU_PD(__name, __top_pd, __mem, __is_pwr_off, __resets, __clks) \
+diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
+index 1a179d4e011cf..d2f0233cb6206 100644
+--- a/drivers/soc/bcm/bcm2835-power.c
++++ b/drivers/soc/bcm/bcm2835-power.c
+@@ -175,7 +175,7 @@ static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable
+ }
+ writel(PM_PASSWORD | val, base + reg);
+
+- while (readl(base + reg) & ASB_ACK) {
++ while (!!(readl(base + reg) & ASB_ACK) == enable) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 1000)
+ return -ETIMEDOUT;
+diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
+index 90a8b2c0676ff..419ed15cc10c4 100644
+--- a/drivers/soc/imx/gpc.c
++++ b/drivers/soc/imx/gpc.c
+@@ -498,6 +498,7 @@ static int imx_gpc_probe(struct platform_device *pdev)
+
+ pd_pdev->dev.parent = &pdev->dev;
+ pd_pdev->dev.of_node = np;
++ pd_pdev->dev.fwnode = of_fwnode_handle(np);
+
+ ret = platform_device_add(pd_pdev);
+ if (ret) {
+diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
+index 2a1096dab63d3..9ebdd0cd0b1cf 100644
+--- a/drivers/soundwire/dmi-quirks.c
++++ b/drivers/soundwire/dmi-quirks.c
+@@ -141,7 +141,7 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16-k0xxx"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16"),
+ },
+ .driver_data = (void *)hp_omen_16,
+ },
+diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
+index 36243a3972fd7..5ac5cb60bae67 100644
+--- a/drivers/thermal/intel/intel_powerclamp.c
++++ b/drivers/thermal/intel/intel_powerclamp.c
+@@ -256,7 +256,7 @@ skip_limit_set:
+
+ static const struct kernel_param_ops max_idle_ops = {
+ .set = max_idle_set,
+- .get = param_get_int,
++ .get = param_get_byte,
+ };
+
+ module_param_cb(max_idle, &max_idle_ops, &max_idle, 0644);
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index 488138a28ae13..e6bfa63b40aee 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -31,6 +31,9 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
+ {
+ struct tb_port *port;
+
++ if (tb_switch_is_icm(sw))
++ return;
++
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_usb3_down(port))
+ continue;
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index 98764e740c078..34c01874f45be 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -377,18 +377,21 @@ void xen_console_resume(void)
+ #ifdef CONFIG_HVC_XEN_FRONTEND
+ static void xencons_disconnect_backend(struct xencons_info *info)
+ {
+- if (info->irq > 0)
+- unbind_from_irqhandler(info->irq, NULL);
+- info->irq = 0;
++ if (info->hvc != NULL)
++ hvc_remove(info->hvc);
++ info->hvc = NULL;
++ if (info->irq > 0) {
++ evtchn_put(info->evtchn);
++ info->irq = 0;
++ info->evtchn = 0;
++ }
++ /* evtchn_put() will also close it so this is only an error path */
+ if (info->evtchn > 0)
+ xenbus_free_evtchn(info->xbdev, info->evtchn);
+ info->evtchn = 0;
+ if (info->gntref > 0)
+ gnttab_free_grant_references(info->gntref);
+ info->gntref = 0;
+- if (info->hvc != NULL)
+- hvc_remove(info->hvc);
+- info->hvc = NULL;
+ }
+
+ static void xencons_free(struct xencons_info *info)
+@@ -433,7 +436,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
+ if (ret)
+ return ret;
+ info->evtchn = evtchn;
+- irq = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
++ irq = bind_evtchn_to_irq_lateeoi(evtchn);
+ if (irq < 0)
+ return irq;
+ info->irq = irq;
+@@ -553,10 +556,23 @@ static void xencons_backend_changed(struct xenbus_device *dev,
+ if (dev->state == XenbusStateClosed)
+ break;
+ fallthrough; /* Missed the backend's CLOSING state */
+- case XenbusStateClosing:
++ case XenbusStateClosing: {
++ struct xencons_info *info = dev_get_drvdata(&dev->dev);;
++
++ /*
++ * Don't tear down the evtchn and grant ref before the other
++ * end has disconnected, but do stop userspace from trying
++ * to use the device before we allow the backend to close.
++ */
++ if (info->hvc) {
++ hvc_remove(info->hvc);
++ info->hvc = NULL;
++ }
++
+ xenbus_frontend_closed(dev);
+ break;
+ }
++ }
+ }
+
+ static const struct xenbus_device_id xencons_ids[] = {
+@@ -588,7 +604,7 @@ static int __init xen_hvc_init(void)
+ ops = &dom0_hvc_ops;
+ r = xen_initial_domain_console_init();
+ if (r < 0)
+- return r;
++ goto register_fe;
+ info = vtermno_to_xencons(HVC_COOKIE);
+ } else {
+ ops = &domU_hvc_ops;
+@@ -597,7 +613,7 @@ static int __init xen_hvc_init(void)
+ else
+ r = xen_pv_console_init();
+ if (r < 0)
+- return r;
++ goto register_fe;
+
+ info = vtermno_to_xencons(HVC_COOKIE);
+ info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
+@@ -616,12 +632,13 @@ static int __init xen_hvc_init(void)
+ list_del(&info->list);
+ spin_unlock_irqrestore(&xencons_lock, flags);
+ if (info->irq)
+- unbind_from_irqhandler(info->irq, NULL);
++ evtchn_put(info->evtchn);
+ kfree(info);
+ return r;
+ }
+
+ r = 0;
++ register_fe:
+ #ifdef CONFIG_HVC_XEN_FRONTEND
+ r = xenbus_register_frontend(&xencons_driver);
+ #endif
+diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
+index 2501db5a7aaf7..677584cab57e4 100644
+--- a/drivers/tty/serial/meson_uart.c
++++ b/drivers/tty/serial/meson_uart.c
+@@ -379,10 +379,14 @@ static void meson_uart_set_termios(struct uart_port *port,
+ else
+ val |= AML_UART_STOP_BIT_1SB;
+
+- if (cflags & CRTSCTS)
+- val &= ~AML_UART_TWO_WIRE_EN;
+- else
++ if (cflags & CRTSCTS) {
++ if (port->flags & UPF_HARD_FLOW)
++ val &= ~AML_UART_TWO_WIRE_EN;
++ else
++ termios->c_cflag &= ~CRTSCTS;
++ } else {
+ val |= AML_UART_TWO_WIRE_EN;
++ }
+
+ writel(val, port->membase + AML_UART_CONTROL);
+
+@@ -697,6 +701,7 @@ static int meson_uart_probe(struct platform_device *pdev)
+ u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */
+ int ret = 0;
+ int irq;
++ bool has_rtscts;
+
+ if (pdev->dev.of_node)
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
+@@ -724,6 +729,7 @@ static int meson_uart_probe(struct platform_device *pdev)
+ return irq;
+
+ of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize);
++ has_rtscts = of_property_read_bool(pdev->dev.of_node, "uart-has-rtscts");
+
+ if (meson_ports[pdev->id]) {
+ dev_err(&pdev->dev, "port %d already allocated\n", pdev->id);
+@@ -743,6 +749,8 @@ static int meson_uart_probe(struct platform_device *pdev)
+ port->mapsize = resource_size(res_mem);
+ port->irq = irq;
+ port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
++ if (has_rtscts)
++ port->flags |= UPF_HARD_FLOW;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE);
+ port->dev = &pdev->dev;
+ port->line = pdev->id;
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index b6e70c5cfa174..88f594d369487 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -263,13 +263,14 @@ static void sysrq_handle_showallcpus(int key)
+ if (in_hardirq())
+ regs = get_irq_regs();
+
+- pr_info("CPU%d:\n", smp_processor_id());
++ pr_info("CPU%d:\n", get_cpu());
+ if (regs)
+ show_regs(regs);
+ else
+ show_stack(NULL, NULL, KERN_INFO);
+
+ schedule_work(&sysrq_showallcpus);
++ put_cpu();
+ }
+ }
+
+diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
+index 34ba6e54789a7..b8b832c75b856 100644
+--- a/drivers/tty/vcc.c
++++ b/drivers/tty/vcc.c
+@@ -579,18 +579,22 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ return -ENOMEM;
+
+ name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL);
++ if (!name) {
++ rv = -ENOMEM;
++ goto free_port;
++ }
+
+ rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions,
+ ARRAY_SIZE(vcc_versions), NULL, name);
+ if (rv)
+- goto free_port;
++ goto free_name;
+
+ port->vio.debug = vcc_dbg_vio;
+ vcc_ldc_cfg.debug = vcc_dbg_ldc;
+
+ rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port);
+ if (rv)
+- goto free_port;
++ goto free_name;
+
+ spin_lock_init(&port->lock);
+
+@@ -624,6 +628,11 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ goto unreg_tty;
+ }
+ port->domain = kstrdup(domain, GFP_KERNEL);
++ if (!port->domain) {
++ rv = -ENOMEM;
++ goto unreg_tty;
++ }
++
+
+ mdesc_release(hp);
+
+@@ -653,8 +662,9 @@ free_table:
+ vcc_table_remove(port->index);
+ free_ldc:
+ vio_ldc_free(&port->vio);
+-free_port:
++free_name:
+ kfree(name);
++free_port:
+ kfree(port);
+
+ return rv;
+diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
+index 386674ead7f0d..08ff0dd73f1ef 100644
+--- a/drivers/ufs/core/ufs-mcq.c
++++ b/drivers/ufs/core/ufs-mcq.c
+@@ -433,7 +433,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+- hwq->max_entries = hba->nutrs;
++ hwq->max_entries = hba->nutrs + 1;
+ spin_lock_init(&hwq->sq_lock);
+ spin_lock_init(&hwq->cq_lock);
+ mutex_init(&hwq->sq_mutex);
+@@ -632,6 +632,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ int tag = scsi_cmd_to_rq(cmd)->tag;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct ufs_hw_queue *hwq;
++ unsigned long flags;
+ int err = FAILED;
+
+ if (!ufshcd_cmd_inflight(lrbp->cmd)) {
+@@ -672,8 +673,10 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ }
+
+ err = SUCCESS;
++ spin_lock_irqsave(&hwq->cq_lock, flags);
+ if (ufshcd_cmd_inflight(lrbp->cmd))
+ ufshcd_release_scsi_cmd(hba, lrbp);
++ spin_unlock_irqrestore(&hwq->cq_lock, flags);
+
+ out:
+ return err;
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 13cd0f1207bf1..dbc3bfa98863a 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -8798,7 +8798,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+ if (ret)
+ goto out;
+
+- if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
++ if (!hba->pm_op_in_progress &&
++ (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
+ /* Reset the device and controller before doing reinit */
+ ufshcd_device_reset(hba);
+ ufshcd_hba_stop(hba);
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index c1557d21b027e..1748ead49b05f 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -820,8 +820,13 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
+ return ret;
+ }
+
+- /* Use the agreed gear */
+- host->hs_gear = dev_req_params->gear_tx;
++ /*
++ * Update hs_gear only when the gears are scaled to a higher value. This is because,
++ * the PHY gear settings are backwards compatible and we only need to change the PHY
++ * settings while scaling to higher gears.
++ */
++ if (dev_req_params->gear_tx > host->hs_gear)
++ host->hs_gear = dev_req_params->gear_tx;
+
+ /* enable the device ref clock before changing to HS mode */
+ if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 343d2570189ff..d25490965b27f 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1094,6 +1094,111 @@ static void dwc3_set_power_down_clk_scale(struct dwc3 *dwc)
+ }
+ }
+
++static void dwc3_config_threshold(struct dwc3 *dwc)
++{
++ u32 reg;
++ u8 rx_thr_num;
++ u8 rx_maxburst;
++ u8 tx_thr_num;
++ u8 tx_maxburst;
++
++ /*
++ * Must config both number of packets and max burst settings to enable
++ * RX and/or TX threshold.
++ */
++ if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
++ rx_thr_num = dwc->rx_thr_num_pkt_prd;
++ rx_maxburst = dwc->rx_max_burst_prd;
++ tx_thr_num = dwc->tx_thr_num_pkt_prd;
++ tx_maxburst = dwc->tx_max_burst_prd;
++
++ if (rx_thr_num && rx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++ reg |= DWC31_RXTHRNUMPKTSEL_PRD;
++
++ reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
++ reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
++
++ reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
++ reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++ }
++
++ if (tx_thr_num && tx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++ reg |= DWC31_TXTHRNUMPKTSEL_PRD;
++
++ reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
++ reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
++
++ reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
++ reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++ }
++ }
++
++ rx_thr_num = dwc->rx_thr_num_pkt;
++ rx_maxburst = dwc->rx_max_burst;
++ tx_thr_num = dwc->tx_thr_num_pkt;
++ tx_maxburst = dwc->tx_max_burst;
++
++ if (DWC3_IP_IS(DWC3)) {
++ if (rx_thr_num && rx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++ reg |= DWC3_GRXTHRCFG_PKTCNTSEL;
++
++ reg &= ~DWC3_GRXTHRCFG_RXPKTCNT(~0);
++ reg |= DWC3_GRXTHRCFG_RXPKTCNT(rx_thr_num);
++
++ reg &= ~DWC3_GRXTHRCFG_MAXRXBURSTSIZE(~0);
++ reg |= DWC3_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++ }
++
++ if (tx_thr_num && tx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++ reg |= DWC3_GTXTHRCFG_PKTCNTSEL;
++
++ reg &= ~DWC3_GTXTHRCFG_TXPKTCNT(~0);
++ reg |= DWC3_GTXTHRCFG_TXPKTCNT(tx_thr_num);
++
++ reg &= ~DWC3_GTXTHRCFG_MAXTXBURSTSIZE(~0);
++ reg |= DWC3_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++ }
++ } else {
++ if (rx_thr_num && rx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
++ reg |= DWC31_GRXTHRCFG_PKTCNTSEL;
++
++ reg &= ~DWC31_GRXTHRCFG_RXPKTCNT(~0);
++ reg |= DWC31_GRXTHRCFG_RXPKTCNT(rx_thr_num);
++
++ reg &= ~DWC31_GRXTHRCFG_MAXRXBURSTSIZE(~0);
++ reg |= DWC31_GRXTHRCFG_MAXRXBURSTSIZE(rx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
++ }
++
++ if (tx_thr_num && tx_maxburst) {
++ reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
++ reg |= DWC31_GTXTHRCFG_PKTCNTSEL;
++
++ reg &= ~DWC31_GTXTHRCFG_TXPKTCNT(~0);
++ reg |= DWC31_GTXTHRCFG_TXPKTCNT(tx_thr_num);
++
++ reg &= ~DWC31_GTXTHRCFG_MAXTXBURSTSIZE(~0);
++ reg |= DWC31_GTXTHRCFG_MAXTXBURSTSIZE(tx_maxburst);
++
++ dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
++ }
++ }
++}
++
+ /**
+ * dwc3_core_init - Low-level initialization of DWC3 Core
+ * @dwc: Pointer to our controller context structure
+@@ -1246,42 +1351,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
+- /*
+- * Must config both number of packets and max burst settings to enable
+- * RX and/or TX threshold.
+- */
+- if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
+- u8 rx_thr_num = dwc->rx_thr_num_pkt_prd;
+- u8 rx_maxburst = dwc->rx_max_burst_prd;
+- u8 tx_thr_num = dwc->tx_thr_num_pkt_prd;
+- u8 tx_maxburst = dwc->tx_max_burst_prd;
+-
+- if (rx_thr_num && rx_maxburst) {
+- reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+- reg |= DWC31_RXTHRNUMPKTSEL_PRD;
+-
+- reg &= ~DWC31_RXTHRNUMPKT_PRD(~0);
+- reg |= DWC31_RXTHRNUMPKT_PRD(rx_thr_num);
+-
+- reg &= ~DWC31_MAXRXBURSTSIZE_PRD(~0);
+- reg |= DWC31_MAXRXBURSTSIZE_PRD(rx_maxburst);
+-
+- dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+- }
+-
+- if (tx_thr_num && tx_maxburst) {
+- reg = dwc3_readl(dwc->regs, DWC3_GTXTHRCFG);
+- reg |= DWC31_TXTHRNUMPKTSEL_PRD;
+-
+- reg &= ~DWC31_TXTHRNUMPKT_PRD(~0);
+- reg |= DWC31_TXTHRNUMPKT_PRD(tx_thr_num);
+-
+- reg &= ~DWC31_MAXTXBURSTSIZE_PRD(~0);
+- reg |= DWC31_MAXTXBURSTSIZE_PRD(tx_maxburst);
+-
+- dwc3_writel(dwc->regs, DWC3_GTXTHRCFG, reg);
+- }
+- }
++ dwc3_config_threshold(dwc);
+
+ return 0;
+
+@@ -1417,6 +1487,10 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ u8 lpm_nyet_threshold;
+ u8 tx_de_emphasis;
+ u8 hird_threshold;
++ u8 rx_thr_num_pkt = 0;
++ u8 rx_max_burst = 0;
++ u8 tx_thr_num_pkt = 0;
++ u8 tx_max_burst = 0;
+ u8 rx_thr_num_pkt_prd = 0;
+ u8 rx_max_burst_prd = 0;
+ u8 tx_thr_num_pkt_prd = 0;
+@@ -1479,6 +1553,14 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ "snps,usb2-lpm-disable");
+ dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
+ "snps,usb2-gadget-lpm-disable");
++ device_property_read_u8(dev, "snps,rx-thr-num-pkt",
++ &rx_thr_num_pkt);
++ device_property_read_u8(dev, "snps,rx-max-burst",
++ &rx_max_burst);
++ device_property_read_u8(dev, "snps,tx-thr-num-pkt",
++ &tx_thr_num_pkt);
++ device_property_read_u8(dev, "snps,tx-max-burst",
++ &tx_max_burst);
+ device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
+ &rx_thr_num_pkt_prd);
+ device_property_read_u8(dev, "snps,rx-max-burst-prd",
+@@ -1560,6 +1642,12 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+
+ dwc->hird_threshold = hird_threshold;
+
++ dwc->rx_thr_num_pkt = rx_thr_num_pkt;
++ dwc->rx_max_burst = rx_max_burst;
++
++ dwc->tx_thr_num_pkt = tx_thr_num_pkt;
++ dwc->tx_max_burst = tx_max_burst;
++
+ dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
+ dwc->rx_max_burst_prd = rx_max_burst_prd;
+
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index a69ac67d89fe6..6782ec8bfd64c 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -211,6 +211,11 @@
+ #define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
+ #define DWC3_GRXTHRCFG_PKTCNTSEL BIT(29)
+
++/* Global TX Threshold Configuration Register */
++#define DWC3_GTXTHRCFG_MAXTXBURSTSIZE(n) (((n) & 0xff) << 16)
++#define DWC3_GTXTHRCFG_TXPKTCNT(n) (((n) & 0xf) << 24)
++#define DWC3_GTXTHRCFG_PKTCNTSEL BIT(29)
++
+ /* Global RX Threshold Configuration Register for DWC_usb31 only */
+ #define DWC31_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 16)
+ #define DWC31_GRXTHRCFG_RXPKTCNT(n) (((n) & 0x1f) << 21)
+@@ -1045,6 +1050,10 @@ struct dwc3_scratchpad_array {
+ * @test_mode_nr: test feature selector
+ * @lpm_nyet_threshold: LPM NYET response threshold
+ * @hird_threshold: HIRD threshold
++ * @rx_thr_num_pkt: USB receive packet count
++ * @rx_max_burst: max USB receive burst size
++ * @tx_thr_num_pkt: USB transmit packet count
++ * @tx_max_burst: max USB transmit burst size
+ * @rx_thr_num_pkt_prd: periodic ESS receive packet count
+ * @rx_max_burst_prd: max periodic ESS receive burst size
+ * @tx_thr_num_pkt_prd: periodic ESS transmit packet count
+@@ -1273,6 +1282,10 @@ struct dwc3 {
+ u8 test_mode_nr;
+ u8 lpm_nyet_threshold;
+ u8 hird_threshold;
++ u8 rx_thr_num_pkt;
++ u8 rx_max_burst;
++ u8 tx_thr_num_pkt;
++ u8 tx_max_burst;
+ u8 rx_thr_num_pkt_prd;
+ u8 rx_max_burst_prd;
+ u8 tx_thr_num_pkt_prd;
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index faf90a2174194..bbb6ff6b11aa1 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -1425,7 +1425,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_ncm *ncm = func_to_ncm(f);
+ struct usb_string *us;
+- int status;
++ int status = 0;
+ struct usb_ep *ep;
+ struct f_ncm_opts *ncm_opts;
+
+@@ -1443,22 +1443,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
+ }
+
+- /*
+- * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+- * configurations are bound in sequence with list_for_each_entry,
+- * in each configuration its functions are bound in sequence
+- * with list_for_each_entry, so we assume no race condition
+- * with regard to ncm_opts->bound access
+- */
+- if (!ncm_opts->bound) {
+- mutex_lock(&ncm_opts->lock);
+- gether_set_gadget(ncm_opts->net, cdev->gadget);
++ mutex_lock(&ncm_opts->lock);
++ gether_set_gadget(ncm_opts->net, cdev->gadget);
++ if (!ncm_opts->bound)
+ status = gether_register_netdev(ncm_opts->net);
+- mutex_unlock(&ncm_opts->lock);
+- if (status)
+- goto fail;
+- ncm_opts->bound = true;
+- }
++ mutex_unlock(&ncm_opts->lock);
++
++ if (status)
++ goto fail;
++
++ ncm_opts->bound = true;
++
+ us = usb_gstrings_attach(cdev, ncm_strings,
+ ARRAY_SIZE(ncm_string_defs));
+ if (IS_ERR(us)) {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index bde43cef8846c..95ed9404f6f85 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -695,7 +695,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+ pm_runtime_put_noidle(&dev->dev);
+
+- if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
++ if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0)
++ pm_runtime_forbid(&dev->dev);
++ else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ pm_runtime_allow(&dev->dev);
+
+ dma_set_max_seg_size(&dev->dev, UINT_MAX);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index fae994f679d45..82aab2f9adbb8 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -968,6 +968,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+ int retval = 0;
+ bool comp_timer_running = false;
+ bool pending_portevent = false;
++ bool suspended_usb3_devs = false;
+ bool reinit_xhc = false;
+
+ if (!hcd->state)
+@@ -1115,10 +1116,17 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+ /*
+ * Resume roothubs only if there are pending events.
+ * USB 3 devices resend U3 LFPS wake after a 100ms delay if
+- * the first wake signalling failed, give it that chance.
++ * the first wake signalling failed, give it that chance if
++ * there are suspended USB 3 devices.
+ */
++ if (xhci->usb3_rhub.bus_state.suspended_ports ||
++ xhci->usb3_rhub.bus_state.bus_suspended)
++ suspended_usb3_devs = true;
++
+ pending_portevent = xhci_pending_portevent(xhci);
+- if (!pending_portevent && msg.event == PM_EVENT_AUTO_RESUME) {
++
++ if (suspended_usb3_devs && !pending_portevent &&
++ msg.event == PM_EVENT_AUTO_RESUME) {
+ msleep(120);
+ pending_portevent = xhci_pending_portevent(xhci);
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
+index 1fe9cb5b6bd96..a2d862eebcecb 100644
+--- a/drivers/usb/typec/ucsi/ucsi_glink.c
++++ b/drivers/usb/typec/ucsi/ucsi_glink.c
+@@ -9,9 +9,13 @@
+ #include <linux/mutex.h>
+ #include <linux/property.h>
+ #include <linux/soc/qcom/pdr.h>
++#include <linux/usb/typec_mux.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/soc/qcom/pmic_glink.h>
+ #include "ucsi.h"
+
++#define PMIC_GLINK_MAX_PORTS 2
++
+ #define UCSI_BUF_SIZE 48
+
+ #define MSG_TYPE_REQ_RESP 1
+@@ -53,6 +57,9 @@ struct ucsi_notify_ind_msg {
+ struct pmic_glink_ucsi {
+ struct device *dev;
+
++ struct gpio_desc *port_orientation[PMIC_GLINK_MAX_PORTS];
++ struct typec_switch *port_switch[PMIC_GLINK_MAX_PORTS];
++
+ struct pmic_glink_client *client;
+
+ struct ucsi *ucsi;
+@@ -221,8 +228,20 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
+ }
+
+ con_num = UCSI_CCI_CONNECTOR(cci);
+- if (con_num)
++ if (con_num) {
++ if (con_num < PMIC_GLINK_MAX_PORTS &&
++ ucsi->port_orientation[con_num - 1]) {
++ int orientation = gpiod_get_value(ucsi->port_orientation[con_num - 1]);
++
++ if (orientation >= 0) {
++ typec_switch_set(ucsi->port_switch[con_num - 1],
++ orientation ? TYPEC_ORIENTATION_REVERSE
++ : TYPEC_ORIENTATION_NORMAL);
++ }
++ }
++
+ ucsi_connector_change(ucsi->ucsi, con_num);
++ }
+
+ if (ucsi->sync_pending && cci & UCSI_CCI_BUSY) {
+ ucsi->sync_val = -EBUSY;
+@@ -283,6 +302,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
+ {
+ struct pmic_glink_ucsi *ucsi;
+ struct device *dev = &adev->dev;
++ struct fwnode_handle *fwnode;
+ int ret;
+
+ ucsi = devm_kzalloc(dev, sizeof(*ucsi), GFP_KERNEL);
+@@ -310,6 +330,38 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
+
+ ucsi_set_drvdata(ucsi->ucsi, ucsi);
+
++ device_for_each_child_node(dev, fwnode) {
++ struct gpio_desc *desc;
++ u32 port;
++
++ ret = fwnode_property_read_u32(fwnode, "reg", &port);
++ if (ret < 0) {
++ dev_err(dev, "missing reg property of %pOFn\n", fwnode);
++ return ret;
++ }
++
++ if (port >= PMIC_GLINK_MAX_PORTS) {
++ dev_warn(dev, "invalid connector number, ignoring\n");
++ continue;
++ }
++
++ desc = devm_gpiod_get_index_optional(&adev->dev, "orientation", port, GPIOD_IN);
++
++ /* If GPIO isn't found, continue */
++ if (!desc)
++ continue;
++
++ if (IS_ERR(desc))
++ return dev_err_probe(dev, PTR_ERR(desc),
++ "unable to acquire orientation gpio\n");
++ ucsi->port_orientation[port] = desc;
++
++ ucsi->port_switch[port] = fwnode_typec_switch_get(fwnode);
++ if (IS_ERR(ucsi->port_switch[port]))
++ return dev_err_probe(dev, PTR_ERR(ucsi->port_switch[port]),
++ "failed to acquire orientation-switch\n");
++ }
++
+ ucsi->client = devm_pmic_glink_register_client(dev,
+ PMIC_GLINK_OWNER_USBC,
+ pmic_glink_ucsi_callback,
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+index b3a3cb1657955..b137f36793439 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+@@ -437,7 +437,7 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ if (blk->shared_backend) {
+ blk->buffer = shared_buffer;
+ } else {
+- blk->buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
++ blk->buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
+ GFP_KERNEL);
+ if (!blk->buffer) {
+ ret = -ENOMEM;
+@@ -495,7 +495,7 @@ static int __init vdpasim_blk_init(void)
+ goto parent_err;
+
+ if (shared_backend) {
+- shared_buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
++ shared_buffer = kvzalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
+ GFP_KERNEL);
+ if (!shared_buffer) {
+ ret = -ENOMEM;
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index b43e8680eee8d..48357c403867f 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -1498,7 +1498,6 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
+
+ err:
+ put_device(&v->dev);
+- ida_simple_remove(&vhost_vdpa_ida, v->minor);
+ return r;
+ }
+
+diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
+index fd3cfdda49491..76527324b63c1 100644
+--- a/drivers/watchdog/sbsa_gwdt.c
++++ b/drivers/watchdog/sbsa_gwdt.c
+@@ -153,14 +153,14 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
+ timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
+
+ if (action)
+- sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt);
++ sbsa_gwdt_reg_write((u64)gwdt->clk * timeout, gwdt);
+ else
+ /*
+ * In the single stage mode, The first signal (WS0) is ignored,
+ * the timeout is (WOR * 2), so the WOR should be configured
+ * to half value of timeout.
+ */
+- sbsa_gwdt_reg_write(gwdt->clk / 2 * timeout, gwdt);
++ sbsa_gwdt_reg_write(((u64)gwdt->clk / 2) * timeout, gwdt);
+
+ return 0;
+ }
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index c803714d0f0d1..87482b3428bf6 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -164,6 +164,8 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
+
+ /* IRQ <-> IPI mapping */
+ static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
++/* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). */
++static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) = {[0 ... XEN_NR_IPIS-1] = 0};
+
+ /* Event channel distribution data */
+ static atomic_t channels_on_cpu[NR_CPUS];
+@@ -366,6 +368,7 @@ static int xen_irq_info_ipi_setup(unsigned cpu,
+ info->u.ipi = ipi;
+
+ per_cpu(ipi_to_irq, cpu)[ipi] = irq;
++ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
+
+ return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
+ }
+@@ -601,7 +604,9 @@ static void lateeoi_list_add(struct irq_info *info)
+
+ spin_lock_irqsave(&eoi->eoi_list_lock, flags);
+
+- if (list_empty(&eoi->eoi_list)) {
++ elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
++ eoi_list);
++ if (!elem || info->eoi_time < elem->eoi_time) {
+ list_add(&info->eoi_list, &eoi->eoi_list);
+ mod_delayed_work_on(info->eoi_cpu, system_wq,
+ &eoi->delayed, delay);
+@@ -981,6 +986,7 @@ static void __unbind_from_irq(unsigned int irq)
+ break;
+ case IRQT_IPI:
+ per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
++ per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(irq)] = 0;
+ break;
+ case IRQT_EVTCHN:
+ dev = info->u.interdomain;
+@@ -1631,7 +1637,7 @@ EXPORT_SYMBOL_GPL(evtchn_put);
+
+ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
+ {
+- int irq;
++ evtchn_port_t evtchn;
+
+ #ifdef CONFIG_X86
+ if (unlikely(vector == XEN_NMI_VECTOR)) {
+@@ -1642,9 +1648,9 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
+ return;
+ }
+ #endif
+- irq = per_cpu(ipi_to_irq, cpu)[vector];
+- BUG_ON(irq < 0);
+- notify_remote_via_irq(irq);
++ evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
++ BUG_ON(evtchn == 0);
++ notify_remote_via_evtchn(evtchn);
+ }
+
+ struct evtchn_loop_ctrl {
+diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
+index e00cf8109b3f3..3c4572ef3a488 100644
+--- a/fs/9p/xattr.c
++++ b/fs/9p/xattr.c
+@@ -68,7 +68,7 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
+ struct p9_fid *fid;
+ int ret;
+
+- p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n",
++ p9_debug(P9_DEBUG_VFS, "name = '%s' value_len = %zu\n",
+ name, buffer_size);
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+@@ -139,7 +139,8 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
+
+ ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+ {
+- return v9fs_xattr_get(dentry, NULL, buffer, buffer_size);
++ /* Txattrwalk with an empty string lists xattrs instead */
++ return v9fs_xattr_get(dentry, "", buffer, buffer_size);
+ }
+
+ static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 5e7a19fca79c4..bf65f801d8439 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -2587,7 +2587,7 @@ static int insert_dev_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
+
+ btrfs_set_dev_extent_length(leaf, extent, num_bytes);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ return ret;
+@@ -3011,7 +3011,7 @@ static int update_block_group_item(struct btrfs_trans_handle *trans,
+ cache->global_root_id);
+ btrfs_set_stack_block_group_flags(&bgi, cache->flags);
+ write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ fail:
+ btrfs_release_path(path);
+ /*
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 617d4827eec26..118ad4d2cbbe2 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -359,7 +359,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
+ return ret;
+ }
+
+- btrfs_mark_buffer_dirty(cow);
++ btrfs_mark_buffer_dirty(trans, cow);
+ *cow_ret = cow;
+ return 0;
+ }
+@@ -627,7 +627,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ cow->start);
+ btrfs_set_node_ptr_generation(parent, parent_slot,
+ trans->transid);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ if (last_ref) {
+ ret = btrfs_tree_mod_log_free_eb(buf);
+ if (ret) {
+@@ -643,7 +643,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ if (unlock_orig)
+ btrfs_tree_unlock(buf);
+ free_extent_buffer_stale(buf);
+- btrfs_mark_buffer_dirty(cow);
++ btrfs_mark_buffer_dirty(trans, cow);
+ *cow_ret = cow;
+ return 0;
+ }
+@@ -1197,7 +1197,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
+ goto out;
+ }
+ btrfs_set_node_key(parent, &right_key, pslot + 1);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ }
+ }
+ if (btrfs_header_nritems(mid) == 1) {
+@@ -1255,7 +1255,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
+ goto out;
+ }
+ btrfs_set_node_key(parent, &mid_key, pslot);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ }
+
+ /* update the path */
+@@ -1362,7 +1362,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
+ return ret;
+ }
+ btrfs_set_node_key(parent, &disk_key, pslot);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ if (btrfs_header_nritems(left) > orig_slot) {
+ path->nodes[level] = left;
+ path->slots[level + 1] -= 1;
+@@ -1422,7 +1422,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
+ return ret;
+ }
+ btrfs_set_node_key(parent, &disk_key, pslot + 1);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+
+ if (btrfs_header_nritems(mid) <= orig_slot) {
+ path->nodes[level] = right;
+@@ -2678,7 +2678,8 @@ int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
+ * higher levels
+ *
+ */
+-static void fixup_low_keys(struct btrfs_path *path,
++static void fixup_low_keys(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path,
+ struct btrfs_disk_key *key, int level)
+ {
+ int i;
+@@ -2695,7 +2696,7 @@ static void fixup_low_keys(struct btrfs_path *path,
+ BTRFS_MOD_LOG_KEY_REPLACE);
+ BUG_ON(ret < 0);
+ btrfs_set_node_key(t, key, tslot);
+- btrfs_mark_buffer_dirty(path->nodes[i]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[i]);
+ if (tslot != 0)
+ break;
+ }
+@@ -2707,10 +2708,11 @@ static void fixup_low_keys(struct btrfs_path *path,
+ * This function isn't completely safe. It's the caller's responsibility
+ * that the new key won't break the order
+ */
+-void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
++void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ const struct btrfs_key *new_key)
+ {
++ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_disk_key disk_key;
+ struct extent_buffer *eb;
+ int slot;
+@@ -2748,9 +2750,9 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
+
+ btrfs_cpu_key_to_disk(&disk_key, new_key);
+ btrfs_set_item_key(eb, &disk_key, slot);
+- btrfs_mark_buffer_dirty(eb);
++ btrfs_mark_buffer_dirty(trans, eb);
+ if (slot == 0)
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+
+ /*
+@@ -2881,8 +2883,8 @@ static int push_node_left(struct btrfs_trans_handle *trans,
+ }
+ btrfs_set_header_nritems(src, src_nritems - push_items);
+ btrfs_set_header_nritems(dst, dst_nritems + push_items);
+- btrfs_mark_buffer_dirty(src);
+- btrfs_mark_buffer_dirty(dst);
++ btrfs_mark_buffer_dirty(trans, src);
++ btrfs_mark_buffer_dirty(trans, dst);
+
+ return ret;
+ }
+@@ -2957,8 +2959,8 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
+ btrfs_set_header_nritems(src, src_nritems - push_items);
+ btrfs_set_header_nritems(dst, dst_nritems + push_items);
+
+- btrfs_mark_buffer_dirty(src);
+- btrfs_mark_buffer_dirty(dst);
++ btrfs_mark_buffer_dirty(trans, src);
++ btrfs_mark_buffer_dirty(trans, dst);
+
+ return ret;
+ }
+@@ -3007,7 +3009,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
+
+ btrfs_set_node_ptr_generation(c, 0, lower_gen);
+
+- btrfs_mark_buffer_dirty(c);
++ btrfs_mark_buffer_dirty(trans, c);
+
+ old = root->node;
+ ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
+@@ -3079,7 +3081,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans,
+ WARN_ON(trans->transid == 0);
+ btrfs_set_node_ptr_generation(lower, slot, trans->transid);
+ btrfs_set_header_nritems(lower, nritems + 1);
+- btrfs_mark_buffer_dirty(lower);
++ btrfs_mark_buffer_dirty(trans, lower);
+
+ return 0;
+ }
+@@ -3158,8 +3160,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
+ btrfs_set_header_nritems(split, c_nritems - mid);
+ btrfs_set_header_nritems(c, mid);
+
+- btrfs_mark_buffer_dirty(c);
+- btrfs_mark_buffer_dirty(split);
++ btrfs_mark_buffer_dirty(trans, c);
++ btrfs_mark_buffer_dirty(trans, split);
+
+ ret = insert_ptr(trans, path, &disk_key, split->start,
+ path->slots[level + 1] + 1, level + 1);
+@@ -3325,15 +3327,15 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
+ btrfs_set_header_nritems(left, left_nritems);
+
+ if (left_nritems)
+- btrfs_mark_buffer_dirty(left);
++ btrfs_mark_buffer_dirty(trans, left);
+ else
+ btrfs_clear_buffer_dirty(trans, left);
+
+- btrfs_mark_buffer_dirty(right);
++ btrfs_mark_buffer_dirty(trans, right);
+
+ btrfs_item_key(right, &disk_key, 0);
+ btrfs_set_node_key(upper, &disk_key, slot + 1);
+- btrfs_mark_buffer_dirty(upper);
++ btrfs_mark_buffer_dirty(trans, upper);
+
+ /* then fixup the leaf pointer in the path */
+ if (path->slots[0] >= left_nritems) {
+@@ -3545,14 +3547,14 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
+ btrfs_set_token_item_offset(&token, i, push_space);
+ }
+
+- btrfs_mark_buffer_dirty(left);
++ btrfs_mark_buffer_dirty(trans, left);
+ if (right_nritems)
+- btrfs_mark_buffer_dirty(right);
++ btrfs_mark_buffer_dirty(trans, right);
+ else
+ btrfs_clear_buffer_dirty(trans, right);
+
+ btrfs_item_key(right, &disk_key, 0);
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+
+ /* then fixup the leaf pointer in the path */
+ if (path->slots[0] < push_items) {
+@@ -3683,8 +3685,8 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
+ if (ret < 0)
+ return ret;
+
+- btrfs_mark_buffer_dirty(right);
+- btrfs_mark_buffer_dirty(l);
++ btrfs_mark_buffer_dirty(trans, right);
++ btrfs_mark_buffer_dirty(trans, l);
+ BUG_ON(path->slots[0] != slot);
+
+ if (mid <= slot) {
+@@ -3925,7 +3927,7 @@ again:
+ path->nodes[0] = right;
+ path->slots[0] = 0;
+ if (path->slots[1] == 0)
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+ /*
+ * We create a new leaf 'right' for the required ins_len and
+@@ -4024,7 +4026,8 @@ err:
+ return ret;
+ }
+
+-static noinline int split_item(struct btrfs_path *path,
++static noinline int split_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path,
+ const struct btrfs_key *new_key,
+ unsigned long split_offset)
+ {
+@@ -4083,7 +4086,7 @@ static noinline int split_item(struct btrfs_path *path,
+ write_extent_buffer(leaf, buf + split_offset,
+ btrfs_item_ptr_offset(leaf, slot),
+ item_size - split_offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ BUG_ON(btrfs_leaf_free_space(leaf) < 0);
+ kfree(buf);
+@@ -4117,7 +4120,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
+ if (ret)
+ return ret;
+
+- ret = split_item(path, new_key, split_offset);
++ ret = split_item(trans, path, new_key, split_offset);
+ return ret;
+ }
+
+@@ -4127,7 +4130,8 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
+ * off the end of the item or if we shift the item to chop bytes off
+ * the front.
+ */
+-void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
++void btrfs_truncate_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path, u32 new_size, int from_end)
+ {
+ int slot;
+ struct extent_buffer *leaf;
+@@ -4203,11 +4207,11 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
+ btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
+ btrfs_set_item_key(leaf, &disk_key, slot);
+ if (slot == 0)
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+
+ btrfs_set_item_size(leaf, slot, new_size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (btrfs_leaf_free_space(leaf) < 0) {
+ btrfs_print_leaf(leaf);
+@@ -4218,7 +4222,8 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
+ /*
+ * make the item pointed to by the path bigger, data_size is the added size.
+ */
+-void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
++void btrfs_extend_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path, u32 data_size)
+ {
+ int slot;
+ struct extent_buffer *leaf;
+@@ -4268,7 +4273,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+ data_end = old_data;
+ old_size = btrfs_item_size(leaf, slot);
+ btrfs_set_item_size(leaf, slot, old_size + data_size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (btrfs_leaf_free_space(leaf) < 0) {
+ btrfs_print_leaf(leaf);
+@@ -4279,6 +4284,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+ /*
+ * Make space in the node before inserting one or more items.
+ *
++ * @trans: transaction handle
+ * @root: root we are inserting items to
+ * @path: points to the leaf/slot where we are going to insert new items
+ * @batch: information about the batch of items to insert
+@@ -4286,7 +4292,8 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+ * Main purpose is to save stack depth by doing the bulk of the work in a
+ * function that doesn't call btrfs_search_slot
+ */
+-static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
++static void setup_items_for_insert(struct btrfs_trans_handle *trans,
++ struct btrfs_root *root, struct btrfs_path *path,
+ const struct btrfs_item_batch *batch)
+ {
+ struct btrfs_fs_info *fs_info = root->fs_info;
+@@ -4306,7 +4313,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
+ */
+ if (path->slots[0] == 0) {
+ btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+ btrfs_unlock_up_safe(path, 1);
+
+@@ -4365,7 +4372,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
+ }
+
+ btrfs_set_header_nritems(leaf, nritems + batch->nr);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (btrfs_leaf_free_space(leaf) < 0) {
+ btrfs_print_leaf(leaf);
+@@ -4376,12 +4383,14 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
+ /*
+ * Insert a new item into a leaf.
+ *
++ * @trans: Transaction handle.
+ * @root: The root of the btree.
+ * @path: A path pointing to the target leaf and slot.
+ * @key: The key of the new item.
+ * @data_size: The size of the data associated with the new key.
+ */
+-void btrfs_setup_item_for_insert(struct btrfs_root *root,
++void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
++ struct btrfs_root *root,
+ struct btrfs_path *path,
+ const struct btrfs_key *key,
+ u32 data_size)
+@@ -4393,7 +4402,7 @@ void btrfs_setup_item_for_insert(struct btrfs_root *root,
+ batch.total_data_size = data_size;
+ batch.nr = 1;
+
+- setup_items_for_insert(root, path, &batch);
++ setup_items_for_insert(trans, root, path, &batch);
+ }
+
+ /*
+@@ -4419,7 +4428,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
+ slot = path->slots[0];
+ BUG_ON(slot < 0);
+
+- setup_items_for_insert(root, path, batch);
++ setup_items_for_insert(trans, root, path, batch);
+ return 0;
+ }
+
+@@ -4444,7 +4453,7 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ leaf = path->nodes[0];
+ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ write_extent_buffer(leaf, data, ptr, data_size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+ btrfs_free_path(path);
+ return ret;
+@@ -4475,7 +4484,7 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
+ return ret;
+
+ path->slots[0]++;
+- btrfs_setup_item_for_insert(root, path, new_key, item_size);
++ btrfs_setup_item_for_insert(trans, root, path, new_key, item_size);
+ leaf = path->nodes[0];
+ memcpy_extent_buffer(leaf,
+ btrfs_item_ptr_offset(leaf, path->slots[0]),
+@@ -4533,9 +4542,9 @@ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_disk_key disk_key;
+
+ btrfs_node_key(parent, &disk_key, 0);
+- fixup_low_keys(path, &disk_key, level + 1);
++ fixup_low_keys(trans, path, &disk_key, level + 1);
+ }
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+ return 0;
+ }
+
+@@ -4632,7 +4641,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_disk_key disk_key;
+
+ btrfs_item_key(leaf, &disk_key, 0);
+- fixup_low_keys(path, &disk_key, 1);
++ fixup_low_keys(trans, path, &disk_key, 1);
+ }
+
+ /*
+@@ -4697,11 +4706,11 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ * dirtied this buffer
+ */
+ if (path->nodes[0] == leaf)
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ free_extent_buffer(leaf);
+ }
+ } else {
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+ }
+ return ret;
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index ff40acd63a374..06333a74d6c4c 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -518,7 +518,7 @@ int btrfs_previous_item(struct btrfs_root *root,
+ int type);
+ int btrfs_previous_extent_item(struct btrfs_root *root,
+ struct btrfs_path *path, u64 min_objectid);
+-void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
++void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ const struct btrfs_key *new_key);
+ struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
+@@ -545,8 +545,10 @@ int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
+ struct extent_buffer *buf);
+ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_path *path, int level, int slot);
+-void btrfs_extend_item(struct btrfs_path *path, u32 data_size);
+-void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end);
++void btrfs_extend_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path, u32 data_size);
++void btrfs_truncate_item(struct btrfs_trans_handle *trans,
++ struct btrfs_path *path, u32 new_size, int from_end);
+ int btrfs_split_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+@@ -610,7 +612,8 @@ struct btrfs_item_batch {
+ int nr;
+ };
+
+-void btrfs_setup_item_for_insert(struct btrfs_root *root,
++void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
++ struct btrfs_root *root,
+ struct btrfs_path *path,
+ const struct btrfs_key *key,
+ u32 data_size);
+diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
+index 427abaf608b8c..0d105ed1b8def 100644
+--- a/fs/btrfs/delalloc-space.c
++++ b/fs/btrfs/delalloc-space.c
+@@ -322,9 +322,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
+ } else {
+ if (current->journal_info)
+ flush = BTRFS_RESERVE_FLUSH_LIMIT;
+-
+- if (btrfs_transaction_in_commit(fs_info))
+- schedule_timeout(1);
+ }
+
+ num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 142e0a0f6a9fe..5d3229b42b3e2 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1030,7 +1030,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_inode_item);
+ write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
+ sizeof(struct btrfs_inode_item));
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
+ goto out;
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index 5f10965fd72bf..5549cbd9bdf6a 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -442,7 +442,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
+ dev_replace->item_needs_writeback = 0;
+ up_write(&dev_replace->rwsem);
+
+- btrfs_mark_buffer_dirty(eb);
++ btrfs_mark_buffer_dirty(trans, eb);
+
+ out:
+ btrfs_free_path(path);
+diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
+index 082eb0e195981..9c07d5c3e5ad2 100644
+--- a/fs/btrfs/dir-item.c
++++ b/fs/btrfs/dir-item.c
+@@ -38,7 +38,7 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
+ di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
+ if (di)
+ return ERR_PTR(-EEXIST);
+- btrfs_extend_item(path, data_size);
++ btrfs_extend_item(trans, path, data_size);
+ } else if (ret < 0)
+ return ERR_PTR(ret);
+ WARN_ON(ret > 0);
+@@ -93,7 +93,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
+
+ write_extent_buffer(leaf, name, name_ptr, name_len);
+ write_extent_buffer(leaf, data, data_ptr, data_len);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+
+ return ret;
+ }
+@@ -153,7 +153,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
+ name_ptr = (unsigned long)(dir_item + 1);
+
+ write_extent_buffer(leaf, name->name, name_ptr, name->len);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ second_insert:
+ /* FIXME, use some real flag for selecting the extra index */
+@@ -439,7 +439,7 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
+ start = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
+ item_len - (ptr + sub_item_len - start));
+- btrfs_truncate_item(path, item_len - sub_item_len, 1);
++ btrfs_truncate_item(trans, path, item_len - sub_item_len, 1);
+ }
+ return ret;
+ }
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 681594df7334f..1ae781f533582 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -872,7 +872,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
+ }
+
+ root->node = leaf;
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ root->commit_root = btrfs_root_node(root);
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+@@ -947,7 +947,7 @@ int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
+
+ root->node = leaf;
+
+- btrfs_mark_buffer_dirty(root->node);
++ btrfs_mark_buffer_dirty(trans, root->node);
+ btrfs_tree_unlock(root->node);
+
+ return 0;
+@@ -4426,7 +4426,8 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ btrfs_close_devices(fs_info->fs_devices);
+ }
+
+-void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
++void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
++ struct extent_buffer *buf)
+ {
+ struct btrfs_fs_info *fs_info = buf->fs_info;
+ u64 transid = btrfs_header_generation(buf);
+@@ -4440,10 +4441,14 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
+ if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
+ return;
+ #endif
++ /* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
++ ASSERT(trans->transid == fs_info->generation);
+ btrfs_assert_tree_write_locked(buf);
+- if (transid != fs_info->generation)
++ if (transid != fs_info->generation) {
+ WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
+ buf->start, transid, fs_info->generation);
++ btrfs_abort_transaction(trans, -EUCLEAN);
++ }
+ set_extent_buffer_dirty(buf);
+ #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+ /*
+diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
+index b03767f4d7edf..e5bdb96912438 100644
+--- a/fs/btrfs/disk-io.h
++++ b/fs/btrfs/disk-io.h
+@@ -105,7 +105,8 @@ static inline struct btrfs_root *btrfs_grab_root(struct btrfs_root *root)
+ }
+
+ void btrfs_put_root(struct btrfs_root *root);
+-void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
++void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
++ struct extent_buffer *buf);
+ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
+ int atomic);
+ int btrfs_read_extent_buffer(struct extent_buffer *buf,
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 14ea6b587e97b..118c56c512bd8 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -596,7 +596,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
+ }
+ }
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ ret = 0;
+ fail:
+ btrfs_release_path(path);
+@@ -644,7 +644,7 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
+ else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
+ btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+ return ret;
+ }
+@@ -997,7 +997,7 @@ out:
+ * helper to add new inline back ref
+ */
+ static noinline_for_stack
+-void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
++void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_extent_inline_ref *iref,
+ u64 parent, u64 root_objectid,
+@@ -1020,7 +1020,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
+ type = extent_ref_type(parent, owner);
+ size = btrfs_extent_inline_ref_size(type);
+
+- btrfs_extend_item(path, size);
++ btrfs_extend_item(trans, path, size);
+
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ refs = btrfs_extent_refs(leaf, ei);
+@@ -1054,7 +1054,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
+ } else {
+ btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
+ }
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+
+ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
+@@ -1087,7 +1087,9 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
+ /*
+ * helper to update/remove inline back ref
+ */
+-static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *path,
++static noinline_for_stack int update_inline_extent_backref(
++ struct btrfs_trans_handle *trans,
++ struct btrfs_path *path,
+ struct btrfs_extent_inline_ref *iref,
+ int refs_to_mod,
+ struct btrfs_delayed_extent_op *extent_op)
+@@ -1195,9 +1197,9 @@ static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *pa
+ memmove_extent_buffer(leaf, ptr, ptr + size,
+ end - ptr - size);
+ item_size -= size;
+- btrfs_truncate_item(path, item_size, 1);
++ btrfs_truncate_item(trans, path, item_size, 1);
+ }
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ return 0;
+ }
+
+@@ -1227,9 +1229,10 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
+ bytenr, num_bytes, root_objectid, path->slots[0]);
+ return -EUCLEAN;
+ }
+- ret = update_inline_extent_backref(path, iref, refs_to_add, extent_op);
++ ret = update_inline_extent_backref(trans, path, iref,
++ refs_to_add, extent_op);
+ } else if (ret == -ENOENT) {
+- setup_inline_extent_backref(trans->fs_info, path, iref, parent,
++ setup_inline_extent_backref(trans, path, iref, parent,
+ root_objectid, owner, offset,
+ refs_to_add, extent_op);
+ ret = 0;
+@@ -1247,7 +1250,8 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
+
+ BUG_ON(!is_data && refs_to_drop != 1);
+ if (iref)
+- ret = update_inline_extent_backref(path, iref, -refs_to_drop, NULL);
++ ret = update_inline_extent_backref(trans, path, iref,
++ -refs_to_drop, NULL);
+ else if (is_data)
+ ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
+ else
+@@ -1531,7 +1535,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ if (extent_op)
+ __run_delayed_extent_op(extent_op, leaf, item);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ /* now insert the actual backref */
+@@ -1697,7 +1701,7 @@ again:
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ __run_delayed_extent_op(extent_op, leaf, ei);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ return err;
+@@ -3171,7 +3175,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
+ }
+ } else {
+ btrfs_set_extent_refs(leaf, ei, refs);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+ if (found_extent) {
+ ret = remove_extent_backref(trans, extent_root, path,
+@@ -4679,7 +4683,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
+ }
+
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_free_path(path);
+
+ return alloc_reserved_extent(trans, ins->objectid, ins->offset);
+@@ -4754,7 +4758,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
+ btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
+ }
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_free_path(path);
+
+ return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 1ce5dd1544995..45cae356e89ba 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -194,7 +194,7 @@ int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_encryption(leaf, item, 0);
+ btrfs_set_file_extent_other_encoding(leaf, item, 0);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ return ret;
+@@ -811,11 +811,12 @@ blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio)
+ * This calls btrfs_truncate_item with the correct args based on the overlap,
+ * and fixes up the key as required.
+ */
+-static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
++static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_key *key,
+ u64 bytenr, u64 len)
+ {
++ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct extent_buffer *leaf;
+ const u32 csum_size = fs_info->csum_size;
+ u64 csum_end;
+@@ -836,7 +837,7 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
+ */
+ u32 new_size = (bytenr - key->offset) >> blocksize_bits;
+ new_size *= csum_size;
+- btrfs_truncate_item(path, new_size, 1);
++ btrfs_truncate_item(trans, path, new_size, 1);
+ } else if (key->offset >= bytenr && csum_end > end_byte &&
+ end_byte > key->offset) {
+ /*
+@@ -848,10 +849,10 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
+ u32 new_size = (csum_end - end_byte) >> blocksize_bits;
+ new_size *= csum_size;
+
+- btrfs_truncate_item(path, new_size, 0);
++ btrfs_truncate_item(trans, path, new_size, 0);
+
+ key->offset = end_byte;
+- btrfs_set_item_key_safe(fs_info, path, key);
++ btrfs_set_item_key_safe(trans, path, key);
+ } else {
+ BUG();
+ }
+@@ -994,7 +995,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+
+ key.offset = end_byte - 1;
+ } else {
+- truncate_one_csum(fs_info, path, &key, bytenr, len);
++ truncate_one_csum(trans, path, &key, bytenr, len);
+ if (key.offset < bytenr)
+ break;
+ }
+@@ -1202,7 +1203,7 @@ extend_csum:
+ diff /= csum_size;
+ diff *= csum_size;
+
+- btrfs_extend_item(path, diff);
++ btrfs_extend_item(trans, path, diff);
+ ret = 0;
+ goto csum;
+ }
+@@ -1249,7 +1250,7 @@ found:
+ ins_size /= csum_size;
+ total_bytes += ins_size * fs_info->sectorsize;
+
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ if (total_bytes < sums->len) {
+ btrfs_release_path(path);
+ cond_resched();
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index eae9175f2c29b..a407af38a9237 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -368,7 +368,7 @@ next_slot:
+ btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - args->start);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (update_refs && disk_bytenr > 0) {
+ btrfs_init_generic_ref(&ref,
+@@ -405,13 +405,13 @@ next_slot:
+
+ memcpy(&new_key, &key, sizeof(new_key));
+ new_key.offset = args->end;
+- btrfs_set_item_key_safe(fs_info, path, &new_key);
++ btrfs_set_item_key_safe(trans, path, &new_key);
+
+ extent_offset += args->end - key.offset;
+ btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - args->end);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ if (update_refs && disk_bytenr > 0)
+ args->bytes_found += args->end - key.offset;
+ break;
+@@ -431,7 +431,7 @@ next_slot:
+
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ args->start - key.offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ if (update_refs && disk_bytenr > 0)
+ args->bytes_found += extent_end - args->start;
+ if (args->end == extent_end)
+@@ -536,7 +536,8 @@ delete_extent_item:
+ if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
+ path->slots[0]++;
+ }
+- btrfs_setup_item_for_insert(root, path, &key, args->extent_item_size);
++ btrfs_setup_item_for_insert(trans, root, path, &key,
++ args->extent_item_size);
+ args->extent_inserted = true;
+ }
+
+@@ -593,7 +594,6 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
+ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode, u64 start, u64 end)
+ {
+- struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *root = inode->root;
+ struct extent_buffer *leaf;
+ struct btrfs_path *path;
+@@ -664,7 +664,7 @@ again:
+ ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ new_key.offset = end;
+- btrfs_set_item_key_safe(fs_info, path, &new_key);
++ btrfs_set_item_key_safe(trans, path, &new_key);
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, fi,
+@@ -679,7 +679,7 @@ again:
+ trans->transid);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ end - other_start);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ goto out;
+ }
+ }
+@@ -698,7 +698,7 @@ again:
+ trans->transid);
+ path->slots[0]++;
+ new_key.offset = start;
+- btrfs_set_item_key_safe(fs_info, path, &new_key);
++ btrfs_set_item_key_safe(trans, path, &new_key);
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+@@ -708,7 +708,7 @@ again:
+ other_end - start);
+ btrfs_set_file_extent_offset(leaf, fi,
+ start - orig_offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ goto out;
+ }
+ }
+@@ -742,7 +742,7 @@ again:
+ btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - split);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
+ num_bytes, 0);
+@@ -814,7 +814,7 @@ again:
+ btrfs_set_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_REG);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ } else {
+ fi = btrfs_item_ptr(leaf, del_slot - 1,
+ struct btrfs_file_extent_item);
+@@ -823,7 +823,7 @@ again:
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - key.offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+ if (ret < 0) {
+@@ -2103,7 +2103,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
+ btrfs_set_file_extent_offset(leaf, fi, 0);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ goto out;
+ }
+
+@@ -2111,7 +2111,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
+ u64 num_bytes;
+
+ key.offset = offset;
+- btrfs_set_item_key_safe(fs_info, path, &key);
++ btrfs_set_item_key_safe(trans, path, &key);
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
+@@ -2120,7 +2120,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
+ btrfs_set_file_extent_offset(leaf, fi, 0);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ goto out;
+ }
+ btrfs_release_path(path);
+@@ -2272,7 +2272,7 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
+ btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
+ if (extent_info->is_new_extent)
+ btrfs_set_file_extent_generation(leaf, extent, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 8808004180759..6b7383ae5a70c 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -195,7 +195,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
+ btrfs_set_inode_nlink(leaf, inode_item, 1);
+ btrfs_set_inode_transid(leaf, inode_item, trans->transid);
+ btrfs_set_inode_block_group(leaf, inode_item, offset);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ key.objectid = BTRFS_FREE_SPACE_OBJECTID;
+@@ -213,7 +213,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
+ struct btrfs_free_space_header);
+ memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
+ btrfs_set_free_space_key(leaf, header, &disk_key);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ return 0;
+@@ -1185,7 +1185,7 @@ update_cache_item(struct btrfs_trans_handle *trans,
+ btrfs_set_free_space_entries(leaf, header, entries);
+ btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
+ btrfs_set_free_space_generation(leaf, header, trans->transid);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ return 0;
+diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
+index f169378e2ca6e..ae060a26e1191 100644
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -89,7 +89,7 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
+ struct btrfs_free_space_info);
+ btrfs_set_free_space_extent_count(leaf, info, 0);
+ btrfs_set_free_space_flags(leaf, info, 0);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ ret = 0;
+ out:
+@@ -287,7 +287,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
+ flags |= BTRFS_FREE_SPACE_USING_BITMAPS;
+ btrfs_set_free_space_flags(leaf, info, flags);
+ expected_extent_count = btrfs_free_space_extent_count(leaf, info);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ if (extent_count != expected_extent_count) {
+@@ -324,7 +324,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
+ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ write_extent_buffer(leaf, bitmap_cursor, ptr,
+ data_size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ i += extent_size;
+@@ -430,7 +430,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
+ flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS;
+ btrfs_set_free_space_flags(leaf, info, flags);
+ expected_extent_count = btrfs_free_space_extent_count(leaf, info);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ nrbits = block_group->length >> block_group->fs_info->sectorsize_bits;
+@@ -495,7 +495,7 @@ static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
+
+ extent_count += new_extents;
+ btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_release_path(path);
+
+ if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
+@@ -533,7 +533,8 @@ int free_space_test_bit(struct btrfs_block_group *block_group,
+ return !!extent_buffer_test_bit(leaf, ptr, i);
+ }
+
+-static void free_space_set_bits(struct btrfs_block_group *block_group,
++static void free_space_set_bits(struct btrfs_trans_handle *trans,
++ struct btrfs_block_group *block_group,
+ struct btrfs_path *path, u64 *start, u64 *size,
+ int bit)
+ {
+@@ -563,7 +564,7 @@ static void free_space_set_bits(struct btrfs_block_group *block_group,
+ extent_buffer_bitmap_set(leaf, ptr, first, last - first);
+ else
+ extent_buffer_bitmap_clear(leaf, ptr, first, last - first);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ *size -= end - *start;
+ *start = end;
+@@ -656,7 +657,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
+ cur_start = start;
+ cur_size = size;
+ while (1) {
+- free_space_set_bits(block_group, path, &cur_start, &cur_size,
++ free_space_set_bits(trans, block_group, path, &cur_start, &cur_size,
+ !remove);
+ if (cur_size == 0)
+ break;
+diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
+index 4c322b720a80a..d3ff97374d48a 100644
+--- a/fs/btrfs/inode-item.c
++++ b/fs/btrfs/inode-item.c
+@@ -167,7 +167,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
+ memmove_extent_buffer(leaf, ptr, ptr + del_len,
+ item_size - (ptr + del_len - item_start));
+
+- btrfs_truncate_item(path, item_size - del_len, 1);
++ btrfs_truncate_item(trans, path, item_size - del_len, 1);
+
+ out:
+ btrfs_free_path(path);
+@@ -229,7 +229,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+ item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
+ item_size - (ptr + sub_item_len - item_start));
+- btrfs_truncate_item(path, item_size - sub_item_len, 1);
++ btrfs_truncate_item(trans, path, item_size - sub_item_len, 1);
+ out:
+ btrfs_free_path(path);
+
+@@ -282,7 +282,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
+ name))
+ goto out;
+
+- btrfs_extend_item(path, ins_len);
++ btrfs_extend_item(trans, path, ins_len);
+ ret = 0;
+ }
+ if (ret < 0)
+@@ -299,7 +299,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
+
+ ptr = (unsigned long)&extref->name;
+ write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+
+ out:
+ btrfs_free_path(path);
+@@ -338,7 +338,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ goto out;
+
+ old_size = btrfs_item_size(path->nodes[0], path->slots[0]);
+- btrfs_extend_item(path, ins_len);
++ btrfs_extend_item(trans, path, ins_len);
+ ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_ref);
+ ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
+@@ -364,7 +364,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+ ptr = (unsigned long)(ref + 1);
+ }
+ write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+
+ out:
+ btrfs_free_path(path);
+@@ -591,7 +591,7 @@ search_again:
+ num_dec = (orig_num_bytes - extent_num_bytes);
+ if (extent_start != 0)
+ control->sub_bytes += num_dec;
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ } else {
+ extent_num_bytes =
+ btrfs_file_extent_disk_num_bytes(leaf, fi);
+@@ -617,7 +617,7 @@ search_again:
+
+ btrfs_set_file_extent_ram_bytes(leaf, fi, size);
+ size = btrfs_file_extent_calc_inline_size(size);
+- btrfs_truncate_item(path, size, 1);
++ btrfs_truncate_item(trans, path, size, 1);
+ } else if (!del_item) {
+ /*
+ * We have to bail so the last_size is set to
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 0f4498dfa30c9..197c1debefed5 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -573,7 +573,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
+ kunmap_local(kaddr);
+ put_page(page);
+ }
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ /*
+@@ -3072,7 +3072,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
+ btrfs_item_ptr_offset(leaf, path->slots[0]),
+ sizeof(struct btrfs_file_extent_item));
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_release_path(path);
+
+ /*
+@@ -4134,7 +4134,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
+ struct btrfs_inode_item);
+
+ fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_set_inode_last_trans(trans, inode);
+ ret = 0;
+ failed:
+@@ -6476,7 +6476,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
+ }
+ }
+
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ /*
+ * We don't need the path anymore, plus inheriting properties, adding
+ * ACLs, security xattrs, orphan item or adding the link, will result in
+@@ -7142,8 +7142,15 @@ static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
+ int ret;
+
+ alloc_hint = get_extent_allocation_hint(inode, start, len);
++again:
+ ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
+ 0, alloc_hint, &ins, 1, 1);
++ if (ret == -EAGAIN) {
++ ASSERT(btrfs_is_zoned(fs_info));
++ wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH,
++ TASK_UNINTERRUPTIBLE);
++ goto again;
++ }
+ if (ret)
+ return ERR_PTR(ret);
+
+@@ -9630,7 +9637,7 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
+
+ ptr = btrfs_file_extent_inline_start(ei);
+ write_extent_buffer(leaf, symname, ptr, name_len);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ btrfs_free_path(path);
+
+ d_instantiate_new(dentry, inode);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 6d0df9bc1e72b..8bdf9bed25c75 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -663,7 +663,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
+ goto out;
+ }
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ inode_item = &root_item->inode;
+ btrfs_set_stack_inode_generation(inode_item, 1);
+@@ -2947,7 +2947,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
+
+ btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
+ btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_release_path(path);
+
+ btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 2637d6b157ff9..74cabaa59be71 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -622,7 +622,7 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
+
+ ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
+
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+
+ btrfs_free_path(path);
+ return ret;
+@@ -700,7 +700,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
+ btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
+ btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ btrfs_release_path(path);
+
+@@ -719,7 +719,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
+ btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
+ btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ ret = 0;
+ out:
+@@ -808,7 +808,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
+ btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
+ btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
+
+- btrfs_mark_buffer_dirty(l);
++ btrfs_mark_buffer_dirty(trans, l);
+
+ out:
+ btrfs_free_path(path);
+@@ -854,7 +854,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
+ btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
+ btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
+
+- btrfs_mark_buffer_dirty(l);
++ btrfs_mark_buffer_dirty(trans, l);
+
+ out:
+ btrfs_free_path(path);
+@@ -896,7 +896,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
+ btrfs_set_qgroup_status_rescan(l, ptr,
+ fs_info->qgroup_rescan_progress.objectid);
+
+- btrfs_mark_buffer_dirty(l);
++ btrfs_mark_buffer_dirty(trans, l);
+
+ out:
+ btrfs_free_path(path);
+@@ -1069,7 +1069,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ BTRFS_QGROUP_STATUS_FLAGS_MASK);
+ btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ key.objectid = 0;
+ key.type = BTRFS_ROOT_REF_KEY;
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 62ed57551824c..31781af447553 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1181,7 +1181,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
+ }
+ }
+ if (dirty)
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ if (inode)
+ btrfs_add_delayed_iput(BTRFS_I(inode));
+ return ret;
+@@ -1374,13 +1374,13 @@ again:
+ */
+ btrfs_set_node_blockptr(parent, slot, new_bytenr);
+ btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
+- btrfs_mark_buffer_dirty(parent);
++ btrfs_mark_buffer_dirty(trans, parent);
+
+ btrfs_set_node_blockptr(path->nodes[level],
+ path->slots[level], old_bytenr);
+ btrfs_set_node_ptr_generation(path->nodes[level],
+ path->slots[level], old_ptr_gen);
+- btrfs_mark_buffer_dirty(path->nodes[level]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[level]);
+
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
+ blocksize, path->nodes[level]->start);
+@@ -2517,7 +2517,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
+ node->eb->start);
+ btrfs_set_node_ptr_generation(upper->eb, slot,
+ trans->transid);
+- btrfs_mark_buffer_dirty(upper->eb);
++ btrfs_mark_buffer_dirty(trans, upper->eb);
+
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
+ node->eb->start, blocksize,
+@@ -3833,7 +3833,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
+ btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
+ btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
+ BTRFS_INODE_PREALLOC);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ return ret;
+diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
+index 859874579456f..5b0f1bccc409c 100644
+--- a/fs/btrfs/root-tree.c
++++ b/fs/btrfs/root-tree.c
+@@ -191,7 +191,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
+ btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
+
+ write_extent_buffer(l, item, ptr, sizeof(*item));
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ out:
+ btrfs_free_path(path);
+ return ret;
+@@ -438,7 +438,7 @@ again:
+ btrfs_set_root_ref_name_len(leaf, ref, name->len);
+ ptr = (unsigned long)(ref + 1);
+ write_extent_buffer(leaf, name->name, ptr, name->len);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ if (key.type == BTRFS_ROOT_BACKREF_KEY) {
+ btrfs_release_path(path);
+diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
+index 5ef0b90e25c3b..6a43a64ba55ad 100644
+--- a/fs/btrfs/tests/extent-buffer-tests.c
++++ b/fs/btrfs/tests/extent-buffer-tests.c
+@@ -61,7 +61,11 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
+ key.type = BTRFS_EXTENT_CSUM_KEY;
+ key.offset = 0;
+
+- btrfs_setup_item_for_insert(root, path, &key, value_len);
++ /*
++ * Passing a NULL trans handle is fine here, we have a dummy root eb
++ * and the tree is a single node (level 0).
++ */
++ btrfs_setup_item_for_insert(NULL, root, path, &key, value_len);
+ write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0),
+ value_len);
+
+diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
+index 05b03f5eab83b..492d69d2fa737 100644
+--- a/fs/btrfs/tests/inode-tests.c
++++ b/fs/btrfs/tests/inode-tests.c
+@@ -34,7 +34,11 @@ static void insert_extent(struct btrfs_root *root, u64 start, u64 len,
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = start;
+
+- btrfs_setup_item_for_insert(root, &path, &key, value_len);
++ /*
++ * Passing a NULL trans handle is fine here, we have a dummy root eb
++ * and the tree is a single node (level 0).
++ */
++ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
+ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, fi, 1);
+ btrfs_set_file_extent_type(leaf, fi, type);
+@@ -64,7 +68,11 @@ static void insert_inode_item_key(struct btrfs_root *root)
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+- btrfs_setup_item_for_insert(root, &path, &key, value_len);
++ /*
++ * Passing a NULL trans handle is fine here, we have a dummy root eb
++ * and the tree is a single node (level 0).
++ */
++ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
+ }
+
+ /*
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index a00e7a0bc713d..ad0d934991741 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -504,9 +504,9 @@ insert:
+ found_size = btrfs_item_size(path->nodes[0],
+ path->slots[0]);
+ if (found_size > item_size)
+- btrfs_truncate_item(path, item_size, 1);
++ btrfs_truncate_item(trans, path, item_size, 1);
+ else if (found_size < item_size)
+- btrfs_extend_item(path, item_size - found_size);
++ btrfs_extend_item(trans, path, item_size - found_size);
+ } else if (ret) {
+ return ret;
+ }
+@@ -574,7 +574,7 @@ insert:
+ }
+ }
+ no_copy:
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_release_path(path);
+ return 0;
+ }
+@@ -3530,7 +3530,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
+ last_offset = max(last_offset, curr_end);
+ }
+ btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
+- btrfs_mark_buffer_dirty(path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
+ btrfs_release_path(path);
+ return 0;
+ }
+@@ -4488,7 +4488,7 @@ copy_item:
+ dst_index++;
+ }
+
+- btrfs_mark_buffer_dirty(dst_path->nodes[0]);
++ btrfs_mark_buffer_dirty(trans, dst_path->nodes[0]);
+ btrfs_release_path(dst_path);
+ out:
+ kfree(ins_data);
+@@ -4693,7 +4693,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
+ write_extent_buffer(leaf, &fi,
+ btrfs_item_ptr_offset(leaf, path->slots[0]),
+ sizeof(fi));
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ btrfs_release_path(path);
+
+diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
+index 7c7001f42b14c..5be74f9e47ebf 100644
+--- a/fs/btrfs/uuid-tree.c
++++ b/fs/btrfs/uuid-tree.c
+@@ -124,7 +124,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+ * An item with that type already exists.
+ * Extend the item and store the new subid at the end.
+ */
+- btrfs_extend_item(path, sizeof(subid_le));
++ btrfs_extend_item(trans, path, sizeof(subid_le));
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ offset = btrfs_item_ptr_offset(eb, slot);
+@@ -139,7 +139,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+ ret = 0;
+ subid_le = cpu_to_le64(subid_cpu);
+ write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le));
+- btrfs_mark_buffer_dirty(eb);
++ btrfs_mark_buffer_dirty(trans, eb);
+
+ out:
+ btrfs_free_path(path);
+@@ -221,7 +221,7 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
+ move_src = offset + sizeof(subid);
+ move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
+ memmove_extent_buffer(eb, move_dst, move_src, move_len);
+- btrfs_truncate_item(path, item_size - sizeof(subid), 1);
++ btrfs_truncate_item(trans, path, item_size - sizeof(subid), 1);
+
+ out:
+ btrfs_free_path(path);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 5019e9244d2d2..1df496c809376 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1908,7 +1908,7 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
+ ptr = btrfs_device_fsid(dev_item);
+ write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
+ ptr, BTRFS_FSID_SIZE);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ ret = 0;
+ out:
+@@ -2613,7 +2613,7 @@ next_slot:
+ if (device->fs_devices->seeding) {
+ btrfs_set_device_generation(leaf, dev_item,
+ device->generation);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ }
+
+ path->slots[0]++;
+@@ -2911,7 +2911,7 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
+ btrfs_device_get_disk_total_bytes(device));
+ btrfs_set_device_bytes_used(leaf, dev_item,
+ btrfs_device_get_bytes_used(device));
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+
+ out:
+ btrfs_free_path(path);
+@@ -3499,7 +3499,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info,
+
+ btrfs_set_balance_flags(leaf, item, bctl->flags);
+
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ out:
+ btrfs_free_path(path);
+ err = btrfs_commit_transaction(trans);
+@@ -7513,7 +7513,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
+ for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+ btrfs_set_dev_stats_value(eb, ptr, i,
+ btrfs_dev_stat_read(device, i));
+- btrfs_mark_buffer_dirty(eb);
++ btrfs_mark_buffer_dirty(trans, eb);
+
+ out:
+ btrfs_free_path(path);
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index fc4b20c2688a0..c454b8ce6babe 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -188,15 +188,15 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
+ if (old_data_len + name_len + sizeof(*di) == item_size) {
+ /* No other xattrs packed in the same leaf item. */
+ if (size > old_data_len)
+- btrfs_extend_item(path, size - old_data_len);
++ btrfs_extend_item(trans, path, size - old_data_len);
+ else if (size < old_data_len)
+- btrfs_truncate_item(path, data_size, 1);
++ btrfs_truncate_item(trans, path, data_size, 1);
+ } else {
+ /* There are other xattrs packed in the same item. */
+ ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ if (ret)
+ goto out;
+- btrfs_extend_item(path, data_size);
++ btrfs_extend_item(trans, path, data_size);
+ }
+
+ ptr = btrfs_item_ptr(leaf, slot, char);
+@@ -205,7 +205,7 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
+ btrfs_set_dir_data_len(leaf, di, size);
+ data_ptr = ((unsigned long)(di + 1)) + name_len;
+ write_extent_buffer(leaf, value, data_ptr, size);
+- btrfs_mark_buffer_dirty(leaf);
++ btrfs_mark_buffer_dirty(trans, leaf);
+ } else {
+ /*
+ * Insert, and we had space for the xattr, so path->slots[0] is
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index e0ff9d156f6f5..43774693f65f5 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -351,14 +351,20 @@ static int exfat_find_empty_entry(struct inode *inode,
+ if (exfat_check_max_dentries(inode))
+ return -ENOSPC;
+
+- /* we trust p_dir->size regardless of FAT type */
+- if (exfat_find_last_cluster(sb, p_dir, &last_clu))
+- return -EIO;
+-
+ /*
+ * Allocate new cluster to this directory
+ */
+- exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
++ if (ei->start_clu != EXFAT_EOF_CLUSTER) {
++ /* we trust p_dir->size regardless of FAT type */
++ if (exfat_find_last_cluster(sb, p_dir, &last_clu))
++ return -EIO;
++
++ exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
++ } else {
++ /* This directory is empty */
++ exfat_chain_set(&clu, EXFAT_EOF_CLUSTER, 0,
++ ALLOC_NO_FAT_CHAIN);
++ }
+
+ /* allocate a cluster */
+ ret = exfat_alloc_cluster(inode, 1, &clu, IS_DIRSYNC(inode));
+@@ -368,6 +374,11 @@ static int exfat_find_empty_entry(struct inode *inode,
+ if (exfat_zeroed_cluster(inode, clu.dir))
+ return -EIO;
+
++ if (ei->start_clu == EXFAT_EOF_CLUSTER) {
++ ei->start_clu = clu.dir;
++ p_dir->dir = clu.dir;
++ }
++
+ /* append to the FAT chain */
+ if (clu.flags != p_dir->flags) {
+ /* no-fat-chain bit is disabled,
+@@ -646,7 +657,7 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
+ info->type = exfat_get_entry_type(ep);
+ info->attr = le16_to_cpu(ep->dentry.file.attr);
+ info->size = le64_to_cpu(ep2->dentry.stream.valid_size);
+- if ((info->type == TYPE_FILE) && (info->size == 0)) {
++ if (info->size == 0) {
+ info->flags = ALLOC_NO_FAT_CHAIN;
+ info->start_clu = EXFAT_EOF_CLUSTER;
+ } else {
+@@ -890,6 +901,9 @@ static int exfat_check_dir_empty(struct super_block *sb,
+
+ dentries_per_clu = sbi->dentries_per_clu;
+
++ if (p_dir->dir == EXFAT_EOF_CLUSTER)
++ return 0;
++
+ exfat_chain_dup(&clu, p_dir);
+
+ while (clu.dir != EXFAT_EOF_CLUSTER) {
+@@ -1257,7 +1271,8 @@ static int __exfat_rename(struct inode *old_parent_inode,
+ }
+
+ /* Free the clusters if new_inode is a dir(as if exfat_rmdir) */
+- if (new_entry_type == TYPE_DIR) {
++ if (new_entry_type == TYPE_DIR &&
++ new_ei->start_clu != EXFAT_EOF_CLUSTER) {
+ /* new_ei, new_clu_to_free */
+ struct exfat_chain new_clu_to_free;
+
+diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
+index 0c5a79c3b5d48..ef4c19e5f5706 100644
+--- a/fs/ext4/acl.h
++++ b/fs/ext4/acl.h
+@@ -68,6 +68,11 @@ extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
+ static inline int
+ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
+ {
++ /* usually, the umask is applied by posix_acl_create(), but if
++ ext4 ACL support is disabled at compile time, we need to do
++ it here, because posix_acl_create() will never be called */
++ inode->i_mode &= ~current_umask();
++
+ return 0;
+ }
+ #endif /* CONFIG_EXT4_FS_POSIX_ACL */
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 9653aab5e9f4a..733abaf805fa4 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1674,7 +1674,8 @@ struct ext4_sb_info {
+
+ /*
+ * Barrier between writepages ops and changing any inode's JOURNAL_DATA
+- * or EXTENTS flag.
++ * or EXTENTS flag or between writepages ops and changing DELALLOC or
++ * DIOREAD_NOLOCK mount options on remount.
+ */
+ struct percpu_rw_semaphore s_writepages_rwsem;
+ struct dax_device *s_daxdev;
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 6f7de14c0fa86..f4b50652f0cce 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -152,8 +152,9 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
+ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+ struct ext4_inode_info *locked_ei);
+-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t len);
++static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
++ ext4_lblk_t len,
++ struct pending_reservation **prealloc);
+
+ int __init ext4_init_es(void)
+ {
+@@ -448,6 +449,19 @@ static void ext4_es_list_del(struct inode *inode)
+ spin_unlock(&sbi->s_es_lock);
+ }
+
++static inline struct pending_reservation *__alloc_pending(bool nofail)
++{
++ if (!nofail)
++ return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
++
++ return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL);
++}
++
++static inline void __free_pending(struct pending_reservation *pr)
++{
++ kmem_cache_free(ext4_pending_cachep, pr);
++}
++
+ /*
+ * Returns true if we cannot fail to allocate memory for this extent_status
+ * entry and cannot reclaim it until its status changes.
+@@ -836,11 +850,12 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ {
+ struct extent_status newes;
+ ext4_lblk_t end = lblk + len - 1;
+- int err1 = 0;
+- int err2 = 0;
++ int err1 = 0, err2 = 0, err3 = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct extent_status *es1 = NULL;
+ struct extent_status *es2 = NULL;
++ struct pending_reservation *pr = NULL;
++ bool revise_pending = false;
+
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return;
+@@ -868,11 +883,17 @@ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+
+ ext4_es_insert_extent_check(inode, &newes);
+
++ revise_pending = sbi->s_cluster_ratio > 1 &&
++ test_opt(inode->i_sb, DELALLOC) &&
++ (status & (EXTENT_STATUS_WRITTEN |
++ EXTENT_STATUS_UNWRITTEN));
+ retry:
+ if (err1 && !es1)
+ es1 = __es_alloc_extent(true);
+ if ((err1 || err2) && !es2)
+ es2 = __es_alloc_extent(true);
++ if ((err1 || err2 || err3) && revise_pending && !pr)
++ pr = __alloc_pending(true);
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+ err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
+@@ -897,13 +918,18 @@ retry:
+ es2 = NULL;
+ }
+
+- if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
+- (status & EXTENT_STATUS_WRITTEN ||
+- status & EXTENT_STATUS_UNWRITTEN))
+- __revise_pending(inode, lblk, len);
++ if (revise_pending) {
++ err3 = __revise_pending(inode, lblk, len, &pr);
++ if (err3 != 0)
++ goto error;
++ if (pr) {
++ __free_pending(pr);
++ pr = NULL;
++ }
++ }
+ error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+- if (err1 || err2)
++ if (err1 || err2 || err3)
+ goto retry;
+
+ ext4_es_print_tree(inode);
+@@ -1311,7 +1337,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
+ rc->ndelonly--;
+ node = rb_next(&pr->rb_node);
+ rb_erase(&pr->rb_node, &tree->root);
+- kmem_cache_free(ext4_pending_cachep, pr);
++ __free_pending(pr);
+ if (!node)
+ break;
+ pr = rb_entry(node, struct pending_reservation,
+@@ -1405,8 +1431,8 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ }
+ }
+ if (count_reserved)
+- count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
+- &orig_es, &rc);
++ count_rsvd(inode, orig_es.es_lblk + len1,
++ orig_es.es_len - len1 - len2, &orig_es, &rc);
+ goto out_get_reserved;
+ }
+
+@@ -1907,11 +1933,13 @@ static struct pending_reservation *__get_pending(struct inode *inode,
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in the cluster to be added
++ * @prealloc - preallocated pending entry
+ *
+ * Returns 0 on successful insertion and -ENOMEM on failure. If the
+ * pending reservation is already in the set, returns successfully.
+ */
+-static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
++static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
++ struct pending_reservation **prealloc)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
+@@ -1937,10 +1965,15 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
+ }
+ }
+
+- pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
+- if (pr == NULL) {
+- ret = -ENOMEM;
+- goto out;
++ if (likely(*prealloc == NULL)) {
++ pr = __alloc_pending(false);
++ if (!pr) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ } else {
++ pr = *prealloc;
++ *prealloc = NULL;
+ }
+ pr->lclu = lclu;
+
+@@ -1970,7 +2003,7 @@ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
+ if (pr != NULL) {
+ tree = &EXT4_I(inode)->i_pending_tree;
+ rb_erase(&pr->rb_node, &tree->root);
+- kmem_cache_free(ext4_pending_cachep, pr);
++ __free_pending(pr);
+ }
+ }
+
+@@ -2029,10 +2062,10 @@ void ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ bool allocated)
+ {
+ struct extent_status newes;
+- int err1 = 0;
+- int err2 = 0;
++ int err1 = 0, err2 = 0, err3 = 0;
+ struct extent_status *es1 = NULL;
+ struct extent_status *es2 = NULL;
++ struct pending_reservation *pr = NULL;
+
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+ return;
+@@ -2052,6 +2085,8 @@ retry:
+ es1 = __es_alloc_extent(true);
+ if ((err1 || err2) && !es2)
+ es2 = __es_alloc_extent(true);
++ if ((err1 || err2 || err3) && allocated && !pr)
++ pr = __alloc_pending(true);
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+ err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
+@@ -2074,11 +2109,18 @@ retry:
+ es2 = NULL;
+ }
+
+- if (allocated)
+- __insert_pending(inode, lblk);
++ if (allocated) {
++ err3 = __insert_pending(inode, lblk, &pr);
++ if (err3 != 0)
++ goto error;
++ if (pr) {
++ __free_pending(pr);
++ pr = NULL;
++ }
++ }
+ error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+- if (err1 || err2)
++ if (err1 || err2 || err3)
+ goto retry;
+
+ ext4_es_print_tree(inode);
+@@ -2184,21 +2226,24 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
+ * @inode - file containing the range
+ * @lblk - logical block defining the start of range
+ * @len - length of range in blocks
++ * @prealloc - preallocated pending entry
+ *
+ * Used after a newly allocated extent is added to the extents status tree.
+ * Requires that the extents in the range have either written or unwritten
+ * status. Must be called while holding i_es_lock.
+ */
+-static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t len)
++static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
++ ext4_lblk_t len,
++ struct pending_reservation **prealloc)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t end = lblk + len - 1;
+ ext4_lblk_t first, last;
+ bool f_del = false, l_del = false;
++ int ret = 0;
+
+ if (len == 0)
+- return;
++ return 0;
+
+ /*
+ * Two cases - block range within single cluster and block range
+@@ -2219,7 +2264,9 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ first, lblk - 1);
+ if (f_del) {
+- __insert_pending(inode, first);
++ ret = __insert_pending(inode, first, prealloc);
++ if (ret < 0)
++ goto out;
+ } else {
+ last = EXT4_LBLK_CMASK(sbi, end) +
+ sbi->s_cluster_ratio - 1;
+@@ -2227,9 +2274,11 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ l_del = __es_scan_range(inode,
+ &ext4_es_is_delonly,
+ end + 1, last);
+- if (l_del)
+- __insert_pending(inode, last);
+- else
++ if (l_del) {
++ ret = __insert_pending(inode, last, prealloc);
++ if (ret < 0)
++ goto out;
++ } else
+ __remove_pending(inode, last);
+ }
+ } else {
+@@ -2237,18 +2286,24 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ if (first != lblk)
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ first, lblk - 1);
+- if (f_del)
+- __insert_pending(inode, first);
+- else
++ if (f_del) {
++ ret = __insert_pending(inode, first, prealloc);
++ if (ret < 0)
++ goto out;
++ } else
+ __remove_pending(inode, first);
+
+ last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
+ if (last != end)
+ l_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ end + 1, last);
+- if (l_del)
+- __insert_pending(inode, last);
+- else
++ if (l_del) {
++ ret = __insert_pending(inode, last, prealloc);
++ if (ret < 0)
++ goto out;
++ } else
+ __remove_pending(inode, last);
+ }
++out:
++ return ret;
+ }
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 73a4b711be025..a443580115896 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -306,80 +306,38 @@ out:
+ }
+
+ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
+- ssize_t written, size_t count)
++ ssize_t count)
+ {
+ handle_t *handle;
+- bool truncate = false;
+- u8 blkbits = inode->i_blkbits;
+- ext4_lblk_t written_blk, end_blk;
+- int ret;
+-
+- /*
+- * Note that EXT4_I(inode)->i_disksize can get extended up to
+- * inode->i_size while the I/O was running due to writeback of delalloc
+- * blocks. But, the code in ext4_iomap_alloc() is careful to use
+- * zeroed/unwritten extents if this is possible; thus we won't leave
+- * uninitialized blocks in a file even if we didn't succeed in writing
+- * as much as we intended.
+- */
+- WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
+- if (offset + count <= EXT4_I(inode)->i_disksize) {
+- /*
+- * We need to ensure that the inode is removed from the orphan
+- * list if it has been added prematurely, due to writeback of
+- * delalloc blocks.
+- */
+- if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
+- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+-
+- if (IS_ERR(handle)) {
+- ext4_orphan_del(NULL, inode);
+- return PTR_ERR(handle);
+- }
+-
+- ext4_orphan_del(handle, inode);
+- ext4_journal_stop(handle);
+- }
+-
+- return written;
+- }
+-
+- if (written < 0)
+- goto truncate;
+
++ lockdep_assert_held_write(&inode->i_rwsem);
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+- if (IS_ERR(handle)) {
+- written = PTR_ERR(handle);
+- goto truncate;
+- }
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
+
+- if (ext4_update_inode_size(inode, offset + written)) {
+- ret = ext4_mark_inode_dirty(handle, inode);
++ if (ext4_update_inode_size(inode, offset + count)) {
++ int ret = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret)) {
+- written = ret;
+ ext4_journal_stop(handle);
+- goto truncate;
++ return ret;
+ }
+ }
+
+- /*
+- * We may need to truncate allocated but not written blocks beyond EOF.
+- */
+- written_blk = ALIGN(offset + written, 1 << blkbits);
+- end_blk = ALIGN(offset + count, 1 << blkbits);
+- if (written_blk < end_blk && ext4_can_truncate(inode))
+- truncate = true;
+-
+- /*
+- * Remove the inode from the orphan list if it has been extended and
+- * everything went OK.
+- */
+- if (!truncate && inode->i_nlink)
++ if (inode->i_nlink)
+ ext4_orphan_del(handle, inode);
+ ext4_journal_stop(handle);
+
+- if (truncate) {
+-truncate:
++ return count;
++}
++
++/*
++ * Clean up the inode after DIO or DAX extending write has completed and the
++ * inode size has been updated using ext4_handle_inode_extension().
++ */
++static void ext4_inode_extension_cleanup(struct inode *inode, ssize_t count)
++{
++ lockdep_assert_held_write(&inode->i_rwsem);
++ if (count < 0) {
+ ext4_truncate_failed_write(inode);
+ /*
+ * If the truncate operation failed early, then the inode may
+@@ -388,9 +346,28 @@ truncate:
+ */
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
++ return;
+ }
++ /*
++ * If i_disksize got extended due to writeback of delalloc blocks while
++ * the DIO was running we could fail to cleanup the orphan list in
++ * ext4_handle_inode_extension(). Do it now.
++ */
++ if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
++ handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+
+- return written;
++ if (IS_ERR(handle)) {
++ /*
++ * The write has successfully completed. Not much to
++ * do with the error here so just cleanup the orphan
++ * list and hope for the best.
++ */
++ ext4_orphan_del(NULL, inode);
++ return;
++ }
++ ext4_orphan_del(handle, inode);
++ ext4_journal_stop(handle);
++ }
+ }
+
+ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+@@ -399,31 +376,22 @@ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+ loff_t pos = iocb->ki_pos;
+ struct inode *inode = file_inode(iocb->ki_filp);
+
++ if (!error && size && flags & IOMAP_DIO_UNWRITTEN)
++ error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
+ if (error)
+ return error;
+-
+- if (size && flags & IOMAP_DIO_UNWRITTEN) {
+- error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
+- if (error < 0)
+- return error;
+- }
+ /*
+- * If we are extending the file, we have to update i_size here before
+- * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
+- * buffered reads could zero out too much from page cache pages. Update
+- * of on-disk size will happen later in ext4_dio_write_iter() where
+- * we have enough information to also perform orphan list handling etc.
+- * Note that we perform all extending writes synchronously under
+- * i_rwsem held exclusively so i_size update is safe here in that case.
+- * If the write was not extending, we cannot see pos > i_size here
+- * because operations reducing i_size like truncate wait for all
+- * outstanding DIO before updating i_size.
++ * Note that EXT4_I(inode)->i_disksize can get extended up to
++ * inode->i_size while the I/O was running due to writeback of delalloc
++ * blocks. But the code in ext4_iomap_alloc() is careful to use
++ * zeroed/unwritten extents if this is possible; thus we won't leave
++ * uninitialized blocks in a file even if we didn't succeed in writing
++ * as much as we intended.
+ */
+- pos += size;
+- if (pos > i_size_read(inode))
+- i_size_write(inode, pos);
+-
+- return 0;
++ WARN_ON_ONCE(i_size_read(inode) < READ_ONCE(EXT4_I(inode)->i_disksize));
++ if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize))
++ return size;
++ return ext4_handle_inode_extension(inode, pos, size);
+ }
+
+ static const struct iomap_dio_ops ext4_dio_write_ops = {
+@@ -569,18 +537,20 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ return ext4_buffered_write_iter(iocb, from);
+ }
+
++ /*
++ * Prevent inline data from being created since we are going to allocate
++ * blocks for DIO. We know the inode does not currently have inline data
++ * because ext4_should_use_dio() checked for it, but we have to clear
++ * the state flag before the write checks because a lock cycle could
++ * introduce races with other writers.
++ */
++ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
++
+ ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend,
+ &unwritten, &dio_flags);
+ if (ret <= 0)
+ return ret;
+
+- /*
+- * Make sure inline data cannot be created anymore since we are going
+- * to allocate blocks for DIO. We know the inode does not have any
+- * inline data now because ext4_dio_supported() checked for that.
+- */
+- ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+-
+ offset = iocb->ki_pos;
+ count = ret;
+
+@@ -606,9 +576,16 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ dio_flags, NULL, 0);
+ if (ret == -ENOTBLK)
+ ret = 0;
+-
+- if (extend)
+- ret = ext4_handle_inode_extension(inode, offset, ret, count);
++ if (extend) {
++ /*
++ * We always perform extending DIO write synchronously so by
++ * now the IO is completed and ext4_handle_inode_extension()
++ * was called. Cleanup the inode in case of error or race with
++ * writeback of delalloc blocks.
++ */
++ WARN_ON_ONCE(ret == -EIOCBQUEUED);
++ ext4_inode_extension_cleanup(inode, ret);
++ }
+
+ out:
+ if (ilock_shared)
+@@ -689,8 +666,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
+
+ ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
+
+- if (extend)
+- ret = ext4_handle_inode_extension(inode, offset, ret, count);
++ if (extend) {
++ ret = ext4_handle_inode_extension(inode, offset, ret);
++ ext4_inode_extension_cleanup(inode, ret);
++ }
+ out:
+ inode_unlock(inode);
+ if (ret > 0)
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 43775a6ca5054..28a92de978f5d 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -789,10 +789,22 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
+ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+ {
++ int ret = 0;
++
+ ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
+ inode->i_ino, create);
+- return _ext4_get_block(inode, iblock, bh_result,
++ ret = _ext4_get_block(inode, iblock, bh_result,
+ EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
++
++ /*
++ * If the buffer is marked unwritten, mark it as new to make sure it is
++ * zeroed out correctly in case of partial writes. Otherwise, there is
++ * a chance of stale data getting exposed.
++ */
++ if (ret == 0 && buffer_unwritten(bh_result))
++ set_buffer_new(bh_result);
++
++ return ret;
+ }
+
+ /* Maximum number of blocks we map for direct IO at once. */
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 0361c20910def..667381180b261 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -560,13 +560,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
+ if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
+ goto handle_itb;
+
+- if (meta_bg == 1) {
+- ext4_group_t first_group;
+- first_group = ext4_meta_bg_first_group(sb, group);
+- if (first_group != group + 1 &&
+- first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
+- goto handle_itb;
+- }
++ if (meta_bg == 1)
++ goto handle_itb;
+
+ block = start + ext4_bg_has_super(sb, group);
+ /* Copy all of the GDT blocks into the backup in this group */
+@@ -1191,8 +1186,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
+ ext4_group_first_block_no(sb, group));
+ BUFFER_TRACE(bh, "get_write_access");
+ if ((err = ext4_journal_get_write_access(handle, sb, bh,
+- EXT4_JTR_NONE)))
++ EXT4_JTR_NONE))) {
++ brelse(bh);
+ break;
++ }
+ lock_buffer(bh);
+ memcpy(bh->b_data, data, size);
+ if (rest)
+@@ -1601,6 +1598,8 @@ exit_journal:
+ int gdb_num_end = ((group + flex_gd->count - 1) /
+ EXT4_DESC_PER_BLOCK(sb));
+ int meta_bg = ext4_has_feature_meta_bg(sb);
++ sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
++ ext4_group_first_block_no(sb, 0);
+ sector_t old_gdb = 0;
+
+ update_backups(sb, ext4_group_first_block_no(sb, 0),
+@@ -1612,8 +1611,8 @@ exit_journal:
+ gdb_num);
+ if (old_gdb == gdb_bh->b_blocknr)
+ continue;
+- update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
+- gdb_bh->b_size, meta_bg);
++ update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
++ gdb_bh->b_data, gdb_bh->b_size, meta_bg);
+ old_gdb = gdb_bh->b_blocknr;
+ }
+ }
+@@ -1980,9 +1979,7 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
+
+ errout:
+ ret = ext4_journal_stop(handle);
+- if (!err)
+- err = ret;
+- return ret;
++ return err ? err : ret;
+
+ invalid_resize_inode:
+ ext4_error(sb, "corrupted/inconsistent resize inode");
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index c94ebf704616e..e08fa12c0cd6a 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -6425,6 +6425,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ struct ext4_mount_options old_opts;
+ ext4_group_t g;
+ int err = 0;
++ int alloc_ctx;
+ #ifdef CONFIG_QUOTA
+ int enable_quota = 0;
+ int i, j;
+@@ -6465,7 +6466,16 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+
+ }
+
++ /*
++ * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause
++ * two calls to ext4_should_dioread_nolock() to return inconsistent
++ * values, triggering WARN_ON in ext4_add_complete_io(). we grab
++ * here s_writepages_rwsem to avoid race between writepages ops and
++ * remount.
++ */
++ alloc_ctx = ext4_writepages_down_write(sb);
+ ext4_apply_options(fc, sb);
++ ext4_writepages_up_write(sb, alloc_ctx);
+
+ if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
+ test_opt(sb, JOURNAL_CHECKSUM)) {
+@@ -6683,6 +6693,8 @@ restore_opts:
+ if ((sb->s_flags & SB_RDONLY) && !(old_sb_flags & SB_RDONLY) &&
+ sb_any_quota_suspended(sb))
+ dquot_resume(sb, -1);
++
++ alloc_ctx = ext4_writepages_down_write(sb);
+ sb->s_flags = old_sb_flags;
+ sbi->s_mount_opt = old_opts.s_mount_opt;
+ sbi->s_mount_opt2 = old_opts.s_mount_opt2;
+@@ -6691,6 +6703,8 @@ restore_opts:
+ sbi->s_commit_interval = old_opts.s_commit_interval;
+ sbi->s_min_batch_time = old_opts.s_min_batch_time;
+ sbi->s_max_batch_time = old_opts.s_max_batch_time;
++ ext4_writepages_up_write(sb, alloc_ctx);
++
+ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
+ ext4_release_system_zone(sb);
+ #ifdef CONFIG_QUOTA
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 236d890f560b0..4d1d41143d5f8 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -1988,7 +1988,7 @@ void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
+ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
+ {
+ dev_t dev = sbi->sb->s_bdev->bd_dev;
+- char slab_name[32];
++ char slab_name[35];
+
+ if (!f2fs_sb_has_compression(sbi))
+ return 0;
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index 0e2d49140c07f..ad8dfac73bd44 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -74,40 +74,14 @@ static void __set_extent_info(struct extent_info *ei,
+ }
+ }
+
+-static bool __may_read_extent_tree(struct inode *inode)
+-{
+- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-
+- if (!test_opt(sbi, READ_EXTENT_CACHE))
+- return false;
+- if (is_inode_flag_set(inode, FI_NO_EXTENT))
+- return false;
+- if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+- !f2fs_sb_has_readonly(sbi))
+- return false;
+- return S_ISREG(inode->i_mode);
+-}
+-
+-static bool __may_age_extent_tree(struct inode *inode)
+-{
+- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-
+- if (!test_opt(sbi, AGE_EXTENT_CACHE))
+- return false;
+- if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
+- return false;
+- if (file_is_cold(inode))
+- return false;
+-
+- return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
+-}
+-
+ static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
+ {
+ if (type == EX_READ)
+- return __may_read_extent_tree(inode);
+- else if (type == EX_BLOCK_AGE)
+- return __may_age_extent_tree(inode);
++ return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) &&
++ S_ISREG(inode->i_mode);
++ if (type == EX_BLOCK_AGE)
++ return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) &&
++ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode));
+ return false;
+ }
+
+@@ -120,7 +94,22 @@ static bool __may_extent_tree(struct inode *inode, enum extent_type type)
+ if (list_empty(&F2FS_I_SB(inode)->s_list))
+ return false;
+
+- return __init_may_extent_tree(inode, type);
++ if (!__init_may_extent_tree(inode, type))
++ return false;
++
++ if (type == EX_READ) {
++ if (is_inode_flag_set(inode, FI_NO_EXTENT))
++ return false;
++ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
++ !f2fs_sb_has_readonly(F2FS_I_SB(inode)))
++ return false;
++ } else if (type == EX_BLOCK_AGE) {
++ if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
++ return false;
++ if (file_is_cold(inode))
++ return false;
++ }
++ return true;
+ }
+
+ static void __try_update_largest_extent(struct extent_tree *et,
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index e53a429bd4c4c..6f08aaf0ea340 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -4006,6 +4006,15 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+ F2FS_I(inode)->i_compress_algorithm = option.algorithm;
+ F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
+ F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
++ /* Set default level */
++ if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
++ F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
++ else
++ F2FS_I(inode)->i_compress_level = 0;
++ /* Adjust mount option level */
++ if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
++ F2FS_OPTION(sbi).compress_level)
++ F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
+ f2fs_mark_inode_dirty_sync(inode, true);
+
+ if (!f2fs_is_compress_backend_ready(inode))
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index ee2e1dd64f256..8b30f11f37b46 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -1467,7 +1467,8 @@ page_hit:
+ ofs_of_node(page), cpver_of_node(page),
+ next_blkaddr_of_node(page));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+- err = -EINVAL;
++ f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
++ err = -EFSCORRUPTED;
+ out_err:
+ ClearPageUptodate(page);
+ out_put_err:
+@@ -2389,7 +2390,7 @@ static int scan_nat_page(struct f2fs_sb_info *sbi,
+ blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
+
+ if (blk_addr == NEW_ADDR)
+- return -EINVAL;
++ return -EFSCORRUPTED;
+
+ if (blk_addr == NULL_ADDR) {
+ add_free_nid(sbi, start_nid, true, true);
+@@ -2504,7 +2505,14 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+
+ if (ret) {
+ f2fs_up_read(&nm_i->nat_tree_lock);
+- f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
++
++ if (ret == -EFSCORRUPTED) {
++ f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
++ set_sbi_flag(sbi, SBI_NEED_FSCK);
++ f2fs_handle_error(sbi,
++ ERROR_INCONSISTENT_NAT);
++ }
++
+ return ret;
+ }
+ }
+@@ -2743,7 +2751,9 @@ recover_xnid:
+ f2fs_update_inode_page(inode);
+
+ /* 3: update and set xattr node page dirty */
+- memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
++ if (page)
++ memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
++ VALID_XATTR_BLOCK_SIZE);
+
+ set_page_dirty(xpage);
+ f2fs_put_page(xpage, 1);
+diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
+index 476b186b90a6c..26217fa578727 100644
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -364,10 +364,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
+
+ *xe = __find_xattr(cur_addr, last_txattr_addr, NULL, index, len, name);
+ if (!*xe) {
+- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++ f2fs_err(F2FS_I_SB(inode), "lookup inode (%lu) has corrupted xattr",
+ inode->i_ino);
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+- err = -EFSCORRUPTED;
++ err = -ENODATA;
+ f2fs_handle_error(F2FS_I_SB(inode),
+ ERROR_CORRUPTED_XATTR);
+ goto out;
+@@ -584,13 +584,12 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+
+ if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
+ (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
+- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++ f2fs_err(F2FS_I_SB(inode), "list inode (%lu) has corrupted xattr",
+ inode->i_ino);
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+- error = -EFSCORRUPTED;
+ f2fs_handle_error(F2FS_I_SB(inode),
+ ERROR_CORRUPTED_XATTR);
+- goto cleanup;
++ break;
+ }
+
+ if (!prefix)
+@@ -650,7 +649,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+
+ if (size > MAX_VALUE_LEN(inode))
+ return -E2BIG;
+-
++retry:
+ error = read_all_xattrs(inode, ipage, &base_addr);
+ if (error)
+ return error;
+@@ -660,7 +659,14 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+ /* find entry with wanted name. */
+ here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
+ if (!here) {
+- f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
++ if (!F2FS_I(inode)->i_xattr_nid) {
++ f2fs_notice(F2FS_I_SB(inode),
++ "recover xattr in inode (%lu)", inode->i_ino);
++ f2fs_recover_xattr_data(inode, NULL);
++ kfree(base_addr);
++ goto retry;
++ }
++ f2fs_err(F2FS_I_SB(inode), "set inode (%lu) has corrupted xattr",
+ inode->i_ino);
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+ error = -EFSCORRUPTED;
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 17c994a0c0d09..28c3711628805 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -1860,16 +1860,24 @@ out:
+ int gfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
+ int mask)
+ {
++ int may_not_block = mask & MAY_NOT_BLOCK;
+ struct gfs2_inode *ip;
+ struct gfs2_holder i_gh;
++ struct gfs2_glock *gl;
+ int error;
+
+ gfs2_holder_mark_uninitialized(&i_gh);
+ ip = GFS2_I(inode);
+- if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
+- if (mask & MAY_NOT_BLOCK)
++ gl = rcu_dereference_check(ip->i_gl, !may_not_block);
++ if (unlikely(!gl)) {
++ /* inode is getting torn down, must be RCU mode */
++ WARN_ON_ONCE(!may_not_block);
++ return -ECHILD;
++ }
++ if (gfs2_glock_is_locked_by_me(gl) == NULL) {
++ if (may_not_block)
+ return -ECHILD;
+- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
++ error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+ if (error)
+ return error;
+ }
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index 8a27957dbfeed..825d7c8afa3a7 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -1261,10 +1261,8 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+
+ if (!sb_rdonly(sb)) {
+ error = init_threads(sdp);
+- if (error) {
+- gfs2_withdraw_delayed(sdp);
++ if (error)
+ goto fail_per_node;
+- }
+ }
+
+ error = gfs2_freeze_lock_shared(sdp);
+diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
+index 704192b736050..ccecb79eeaf8e 100644
+--- a/fs/gfs2/quota.c
++++ b/fs/gfs2/quota.c
+@@ -441,6 +441,17 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
+ (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
+ return 0;
+
++ /*
++ * If qd_change is 0 it means a pending quota change was negated.
++ * We should not sync it, but we still have a qd reference and slot
++ * reference taken by gfs2_quota_change -> do_qc that need to be put.
++ */
++ if (!qd->qd_change && test_and_clear_bit(QDF_CHANGE, &qd->qd_flags)) {
++ slot_put(qd);
++ qd_put(qd);
++ return 0;
++ }
++
+ if (!lockref_get_not_dead(&qd->qd_lockref))
+ return 0;
+
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 9f4d5d6549ee6..f98ddb9d19a21 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -1558,7 +1558,7 @@ out:
+ wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
+ gfs2_glock_add_to_lru(ip->i_gl);
+ gfs2_glock_put_eventually(ip->i_gl);
+- ip->i_gl = NULL;
++ rcu_assign_pointer(ip->i_gl, NULL);
+ }
+ }
+
+diff --git a/fs/inode.c b/fs/inode.c
+index 67611a360031b..f11b4173d3f41 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -2498,6 +2498,22 @@ struct timespec64 current_time(struct inode *inode)
+ }
+ EXPORT_SYMBOL(current_time);
+
++/**
++ * inode_set_ctime_current - set the ctime to current_time
++ * @inode: inode
++ *
++ * Set the inode->i_ctime to the current value for the inode. Returns
++ * the current value that was assigned to i_ctime.
++ */
++struct timespec64 inode_set_ctime_current(struct inode *inode)
++{
++ struct timespec64 now = current_time(inode);
++
++ inode_set_ctime(inode, now.tv_sec, now.tv_nsec);
++ return now;
++}
++EXPORT_SYMBOL(inode_set_ctime_current);
++
+ /**
+ * in_group_or_capable - check whether caller is CAP_FSETID privileged
+ * @idmap: idmap of the mount @inode was found from
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index c269a7d29a465..5b771a3d8d9ae 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -289,6 +289,8 @@ int jbd2_journal_recover(journal_t *journal)
+ journal_superblock_t * sb;
+
+ struct recovery_info info;
++ errseq_t wb_err;
++ struct address_space *mapping;
+
+ memset(&info, 0, sizeof(info));
+ sb = journal->j_superblock;
+@@ -306,6 +308,9 @@ int jbd2_journal_recover(journal_t *journal)
+ return 0;
+ }
+
++ wb_err = 0;
++ mapping = journal->j_fs_dev->bd_inode->i_mapping;
++ errseq_check_and_advance(&mapping->wb_err, &wb_err);
+ err = do_one_pass(journal, &info, PASS_SCAN);
+ if (!err)
+ err = do_one_pass(journal, &info, PASS_REVOKE);
+@@ -327,6 +332,9 @@ int jbd2_journal_recover(journal_t *journal)
+
+ jbd2_journal_clear_revoke(journal);
+ err2 = sync_blockdev(journal->j_fs_dev);
++ if (!err)
++ err = err2;
++ err2 = errseq_check_and_advance(&mapping->wb_err, &wb_err);
+ if (!err)
+ err = err2;
+ /* Make sure all replayed data is on permanent storage */
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 88afd108c2dd2..11c77757ead9e 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -87,7 +87,7 @@ static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
+ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
+ static int dbFindBits(u32 word, int l2nb);
+ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
+-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
++static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl);
+ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ int nblocks);
+ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
+@@ -180,7 +180,8 @@ int dbMount(struct inode *ipbmap)
+ bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
+
+ bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
+- if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE) {
++ if (bmp->db_l2nbperpage > L2PSIZE - L2MINBLOCKSIZE ||
++ bmp->db_l2nbperpage < 0) {
+ err = -EINVAL;
+ goto err_release_metapage;
+ }
+@@ -194,6 +195,12 @@ int dbMount(struct inode *ipbmap)
+ bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
+ bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
+ bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
++ if (bmp->db_maxag >= MAXAG || bmp->db_maxag < 0 ||
++ bmp->db_agpref >= MAXAG || bmp->db_agpref < 0) {
++ err = -EINVAL;
++ goto err_release_metapage;
++ }
++
+ bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
+ bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
+ bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+@@ -1710,7 +1717,7 @@ static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
+ * dbFindLeaf() returns the index of the leaf at which
+ * free space was found.
+ */
+- rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx);
++ rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx, true);
+
+ /* release the buffer.
+ */
+@@ -1957,7 +1964,7 @@ dbAllocDmapLev(struct bmap * bmp,
+ * free space. if sufficient free space is found, dbFindLeaf()
+ * returns the index of the leaf at which free space was found.
+ */
+- if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
++ if (dbFindLeaf((dmtree_t *) &dp->tree, l2nb, &leafidx, false))
+ return -ENOSPC;
+
+ if (leafidx < 0)
+@@ -2921,14 +2928,18 @@ static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
+ * leafidx - return pointer to be set to the index of the leaf
+ * describing at least l2nb free blocks if sufficient
+ * free blocks are found.
++ * is_ctl - determines if the tree is of type ctl
+ *
+ * RETURN VALUES:
+ * 0 - success
+ * -ENOSPC - insufficient free blocks.
+ */
+-static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
++static int dbFindLeaf(dmtree_t *tp, int l2nb, int *leafidx, bool is_ctl)
+ {
+ int ti, n = 0, k, x = 0;
++ int max_size;
++
++ max_size = is_ctl ? CTLTREESIZE : TREESIZE;
+
+ /* first check the root of the tree to see if there is
+ * sufficient free space.
+@@ -2949,6 +2960,8 @@ static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
+ /* sufficient free space found. move to the next
+ * level (or quit if this is the last level).
+ */
++ if (x + n > max_size)
++ return -ENOSPC;
+ if (l2nb <= tp->dmt_stree[x + n])
+ break;
+ }
+diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
+index 6fb28572cb2c6..34f1358264e23 100644
+--- a/fs/jfs/jfs_imap.c
++++ b/fs/jfs/jfs_imap.c
+@@ -1320,7 +1320,7 @@ diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
+ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
+ {
+ int rc, ino, iagno, addext, extno, bitno, sword;
+- int nwords, rem, i, agno;
++ int nwords, rem, i, agno, dn_numag;
+ u32 mask, inosmap, extsmap;
+ struct inode *ipimap;
+ struct metapage *mp;
+@@ -1356,6 +1356,9 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
+
+ /* get the ag number of this iag */
+ agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
++ dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag;
++ if (agno < 0 || agno > dn_numag)
++ return -EIO;
+
+ if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
+ /*
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 5f088e3eeca1d..8374fa230ba5a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5622,7 +5622,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
+
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
+ nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
+- nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
++ nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
+ }
+
+ static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+@@ -5663,7 +5663,8 @@ static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_mess
+ data->res.server = server;
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
+ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
+- nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
++ nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
++ NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
+ }
+
+ static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
+@@ -8934,6 +8935,7 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+
+ sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
+
++try_again:
+ /* Test connection for session trunking. Async exchange_id call */
+ task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
+ if (IS_ERR(task))
+@@ -8946,11 +8948,15 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+
+ if (status == 0)
+ rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
+- else if (rpc_clnt_xprt_switch_has_addr(clnt,
++ else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
+ (struct sockaddr *)&xprt->addr))
+ rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
+
+ rpc_put_task(task);
++ if (status == -NFS4ERR_DELAY) {
++ ssleep(1);
++ goto try_again;
++ }
+ }
+ EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index daf305daa7516..2a493cbaf453d 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -2785,7 +2785,7 @@ static int client_opens_release(struct inode *inode, struct file *file)
+
+ /* XXX: alternatively, we could get/drop in seq start/stop */
+ drop_client(clp);
+- return 0;
++ return seq_release(inode, file);
+ }
+
+ static const struct file_operations client_states_fops = {
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index a8eda1c85829e..1ad4f30d5f855 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -582,24 +582,17 @@ void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+ return;
+ }
+
+-/*
+- * Copy cached reply to current reply buffer. Should always fit.
+- * FIXME as reply is in a page, we should just attach the page, and
+- * keep a refcount....
+- */
+ static int
+ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
+ {
+- struct kvec *vec = &rqstp->rq_res.head[0];
+-
+- if (vec->iov_len + data->iov_len > PAGE_SIZE) {
+- printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
+- data->iov_len);
+- return 0;
+- }
+- memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
+- vec->iov_len += data->iov_len;
+- return 1;
++ __be32 *p;
++
++ p = xdr_reserve_space(&rqstp->rq_res_stream, data->iov_len);
++ if (unlikely(!p))
++ return false;
++ memcpy(p, data->iov_base, data->iov_len);
++ xdr_commit_encode(&rqstp->rq_res_stream);
++ return true;
+ }
+
+ /*
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 80a70eaa30d90..1ef8c0d8871ed 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -1467,7 +1467,7 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
+ ovl_trusted_xattr_handlers;
+ sb->s_fs_info = ofs;
+ sb->s_flags |= SB_POSIXACL;
+- sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE;
++ sb->s_iflags |= SB_I_SKIP_SYNC;
+
+ err = -ENOMEM;
+ root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe);
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 5ea42653126eb..800d34c3a3cca 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -1574,7 +1574,6 @@ static const struct sysctl_alias sysctl_aliases[] = {
+ {"hung_task_panic", "kernel.hung_task_panic" },
+ {"numa_zonelist_order", "vm.numa_zonelist_order" },
+ {"softlockup_all_cpu_backtrace", "kernel.softlockup_all_cpu_backtrace" },
+- {"softlockup_panic", "kernel.softlockup_panic" },
+ { }
+ };
+
+@@ -1590,6 +1589,13 @@ static const char *sysctl_find_alias(char *param)
+ return NULL;
+ }
+
++bool sysctl_is_alias(char *param)
++{
++ const char *alias = sysctl_find_alias(param);
++
++ return alias != NULL;
++}
++
+ /* Set sysctl value passed on kernel command line. */
+ static int process_sysctl_arg(char *param, char *val,
+ const char *unused, void *arg)
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index bfa423ae16e3d..188984b0af66f 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -2403,6 +2403,20 @@ static int vfs_setup_quota_inode(struct inode *inode, int type)
+ if (sb_has_quota_loaded(sb, type))
+ return -EBUSY;
+
++ /*
++ * Quota files should never be encrypted. They should be thought of as
++ * filesystem metadata, not user data. New-style internal quota files
++ * cannot be encrypted by users anyway, but old-style external quota
++ * files could potentially be incorrectly created in an encrypted
++ * directory, hence this explicit check. Some reasons why encrypted
++ * quota files don't work include: (1) some filesystems that support
++ * encryption don't handle it in their quota_read and quota_write, and
++ * (2) cleaning up encrypted quota files at unmount would need special
++ * consideration, as quota files are cleaned up later than user files.
++ */
++ if (IS_ENCRYPTED(inode))
++ return -EINVAL;
++
+ dqopt->files[type] = igrab(inode);
+ if (!dqopt->files[type])
+ return -EIO;
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index fe1bf5b6e0cb3..59f6b8e32cc97 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -32,7 +32,7 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ * fully cached or it may be in the process of
+ * being deleted due to a lease break.
+ */
+- if (!cfid->has_lease) {
++ if (!cfid->time || !cfid->has_lease) {
+ spin_unlock(&cfids->cfid_list_lock);
+ return NULL;
+ }
+@@ -193,10 +193,20 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ npath = path_no_prefix(cifs_sb, path);
+ if (IS_ERR(npath)) {
+ rc = PTR_ERR(npath);
+- kfree(utf16_path);
+- return rc;
++ goto out;
+ }
+
++ if (!npath[0]) {
++ dentry = dget(cifs_sb->root);
++ } else {
++ dentry = path_to_dentry(cifs_sb, npath);
++ if (IS_ERR(dentry)) {
++ rc = -ENOENT;
++ goto out;
++ }
++ }
++ cfid->dentry = dentry;
++
+ /*
+ * We do not hold the lock for the open because in case
+ * SMB2_open needs to reconnect.
+@@ -249,6 +259,15 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+
+ smb2_set_related(&rqst[1]);
+
++ /*
++ * Set @cfid->has_lease to true before sending out compounded request so
++ * its lease reference can be put in cached_dir_lease_break() due to a
++ * potential lease break right after the request is sent or while @cfid
++ * is still being cached. Concurrent processes won't be to use it yet
++ * due to @cfid->time being zero.
++ */
++ cfid->has_lease = true;
++
+ rc = compound_send_recv(xid, ses, server,
+ flags, 2, rqst,
+ resp_buftype, rsp_iov);
+@@ -263,6 +282,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ cfid->tcon = tcon;
+ cfid->is_open = true;
+
++ spin_lock(&cfids->cfid_list_lock);
++
+ o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
+ oparms.fid->persistent_fid = o_rsp->PersistentFileId;
+ oparms.fid->volatile_fid = o_rsp->VolatileFileId;
+@@ -270,18 +291,25 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
+ #endif /* CIFS_DEBUG2 */
+
+- if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
++ rc = -EINVAL;
++ if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
++ spin_unlock(&cfids->cfid_list_lock);
+ goto oshr_free;
++ }
+
+ smb2_parse_contexts(server, o_rsp,
+ &oparms.fid->epoch,
+ oparms.fid->lease_key, &oplock,
+ NULL, NULL);
+- if (!(oplock & SMB2_LEASE_READ_CACHING_HE))
++ if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
++ spin_unlock(&cfids->cfid_list_lock);
+ goto oshr_free;
++ }
+ qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+- if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
++ if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
++ spin_unlock(&cfids->cfid_list_lock);
+ goto oshr_free;
++ }
+ if (!smb2_validate_and_copy_iov(
+ le16_to_cpu(qi_rsp->OutputBufferOffset),
+ sizeof(struct smb2_file_all_info),
+@@ -289,37 +317,24 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ (char *)&cfid->file_all_info))
+ cfid->file_all_info_is_valid = true;
+
+- if (!npath[0])
+- dentry = dget(cifs_sb->root);
+- else {
+- dentry = path_to_dentry(cifs_sb, npath);
+- if (IS_ERR(dentry)) {
+- rc = -ENOENT;
+- goto oshr_free;
+- }
+- }
+- spin_lock(&cfids->cfid_list_lock);
+- cfid->dentry = dentry;
+ cfid->time = jiffies;
+- cfid->has_lease = true;
+ spin_unlock(&cfids->cfid_list_lock);
++ /* At this point the directory handle is fully cached */
++ rc = 0;
+
+ oshr_free:
+- kfree(utf16_path);
+ SMB2_open_free(&rqst[0]);
+ SMB2_query_info_free(&rqst[1]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+- spin_lock(&cfids->cfid_list_lock);
+- if (!cfid->has_lease) {
+- if (rc) {
+- if (cfid->on_list) {
+- list_del(&cfid->entry);
+- cfid->on_list = false;
+- cfids->num_entries--;
+- }
+- rc = -ENOENT;
+- } else {
++ if (rc) {
++ spin_lock(&cfids->cfid_list_lock);
++ if (cfid->on_list) {
++ list_del(&cfid->entry);
++ cfid->on_list = false;
++ cfids->num_entries--;
++ }
++ if (cfid->has_lease) {
+ /*
+ * We are guaranteed to have two references at this
+ * point. One for the caller and one for a potential
+@@ -327,25 +342,24 @@ oshr_free:
+ * will be closed when the caller closes the cached
+ * handle.
+ */
++ cfid->has_lease = false;
+ spin_unlock(&cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ goto out;
+ }
++ spin_unlock(&cfids->cfid_list_lock);
+ }
+- spin_unlock(&cfids->cfid_list_lock);
++out:
+ if (rc) {
+ if (cfid->is_open)
+ SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ cfid->fid.volatile_fid);
+ free_cached_dir(cfid);
+- cfid = NULL;
+- }
+-out:
+- if (rc == 0) {
++ } else {
+ *ret_cfid = cfid;
+ atomic_inc(&tcon->num_remote_opens);
+ }
+-
++ kfree(utf16_path);
+ return rc;
+ }
+
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index aec6e91374742..e59505eff75ca 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -452,6 +452,11 @@ skip_rdma:
+ seq_printf(m, "\n\n\tSessions: ");
+ i = 0;
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++ spin_lock(&ses->ses_lock);
++ if (ses->ses_status == SES_EXITING) {
++ spin_unlock(&ses->ses_lock);
++ continue;
++ }
+ i++;
+ if ((ses->serverDomain == NULL) ||
+ (ses->serverOS == NULL) ||
+@@ -472,6 +477,7 @@ skip_rdma:
+ ses->ses_count, ses->serverOS, ses->serverNOS,
+ ses->capabilities, ses->ses_status);
+ }
++ spin_unlock(&ses->ses_lock);
+
+ seq_printf(m, "\n\tSecurity type: %s ",
+ get_security_type_str(server->ops->select_sectype(server, ses->sectype)));
+diff --git a/fs/smb/client/cifs_ioctl.h b/fs/smb/client/cifs_ioctl.h
+index 332588e77c311..26327442e383b 100644
+--- a/fs/smb/client/cifs_ioctl.h
++++ b/fs/smb/client/cifs_ioctl.h
+@@ -26,6 +26,11 @@ struct smb_mnt_fs_info {
+ __u64 cifs_posix_caps;
+ } __packed;
+
++struct smb_mnt_tcon_info {
++ __u32 tid;
++ __u64 session_id;
++} __packed;
++
+ struct smb_snapshot_array {
+ __u32 number_of_snapshots;
+ __u32 number_of_snapshots_returned;
+@@ -108,6 +113,7 @@ struct smb3_notify_info {
+ #define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify)
+ #define CIFS_DUMP_FULL_KEY _IOWR(CIFS_IOCTL_MAGIC, 10, struct smb3_full_key_debug_info)
+ #define CIFS_IOC_NOTIFY_INFO _IOWR(CIFS_IOCTL_MAGIC, 11, struct smb3_notify_info)
++#define CIFS_IOC_GET_TCON_INFO _IOR(CIFS_IOCTL_MAGIC, 12, struct smb_mnt_tcon_info)
+ #define CIFS_IOC_SHUTDOWN _IOR('X', 125, __u32)
+
+ /*
+diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c
+index 6f3285f1dfee5..af7849e5974ff 100644
+--- a/fs/smb/client/cifs_spnego.c
++++ b/fs/smb/client/cifs_spnego.c
+@@ -64,8 +64,8 @@ struct key_type cifs_spnego_key_type = {
+ * strlen(";sec=ntlmsspi") */
+ #define MAX_MECH_STR_LEN 13
+
+-/* strlen of "host=" */
+-#define HOST_KEY_LEN 5
++/* strlen of ";host=" */
++#define HOST_KEY_LEN 6
+
+ /* strlen of ";ip4=" or ";ip6=" */
+ #define IP_KEY_LEN 5
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index e19df244ea7ea..3e2cdcaa9c1db 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -1191,6 +1191,7 @@ const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
+
+ const struct inode_operations cifs_symlink_inode_ops = {
+ .get_link = cifs_get_link,
++ .setattr = cifs_setattr,
+ .permission = cifs_permission,
+ .listxattr = cifs_listxattr,
+ };
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index ac68fed5ad28a..64dce1081d007 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -2113,6 +2113,7 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
+ unsigned int len, skip;
+ unsigned int nents = 0;
+ unsigned long addr;
++ size_t data_size;
+ int i, j;
+
+ /*
+@@ -2128,17 +2129,21 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
+ * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
+ */
+ for (i = 0; i < num_rqst; i++) {
++ data_size = iov_iter_count(&rqst[i].rq_iter);
++
+ /* We really don't want a mixture of pinned and unpinned pages
+ * in the sglist. It's hard to keep track of which is what.
+ * Instead, we convert to a BVEC-type iterator higher up.
+ */
+- if (WARN_ON_ONCE(user_backed_iter(&rqst[i].rq_iter)))
++ if (data_size &&
++ WARN_ON_ONCE(user_backed_iter(&rqst[i].rq_iter)))
+ return -EIO;
+
+ /* We also don't want to have any extra refs or pins to clean
+ * up in the sglist.
+ */
+- if (WARN_ON_ONCE(iov_iter_extract_will_pin(&rqst[i].rq_iter)))
++ if (data_size &&
++ WARN_ON_ONCE(iov_iter_extract_will_pin(&rqst[i].rq_iter)))
+ return -EIO;
+
+ for (j = 0; j < rqst[i].rq_nvec; j++) {
+@@ -2154,7 +2159,8 @@ static inline int cifs_get_num_sgs(const struct smb_rqst *rqst,
+ }
+ skip = 0;
+ }
+- nents += iov_iter_npages(&rqst[i].rq_iter, INT_MAX);
++ if (data_size)
++ nents += iov_iter_npages(&rqst[i].rq_iter, INT_MAX);
+ }
+ nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE);
+ return nents;
+diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
+index e17222fec9d29..a75220db5c1e1 100644
+--- a/fs/smb/client/cifspdu.h
++++ b/fs/smb/client/cifspdu.h
+@@ -2570,7 +2570,7 @@ typedef struct {
+
+
+ struct win_dev {
+- unsigned char type[8]; /* IntxCHR or IntxBLK */
++ unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO*/
+ __le64 major;
+ __le64 minor;
+ } __attribute__((packed));
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index bd0a1505719a4..a01ee1b0a66ce 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -81,7 +81,7 @@ extern char *cifs_build_path_to_root(struct smb3_fs_context *ctx,
+ extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
+ char *cifs_build_devname(char *nodename, const char *prepath);
+ extern void delete_mid(struct mid_q_entry *mid);
+-extern void release_mid(struct mid_q_entry *mid);
++void __release_mid(struct kref *refcount);
+ extern void cifs_wake_up_task(struct mid_q_entry *mid);
+ extern int cifs_handle_standard(struct TCP_Server_Info *server,
+ struct mid_q_entry *mid);
+@@ -741,4 +741,9 @@ static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
+ return true;
+ }
+
++static inline void release_mid(struct mid_q_entry *mid)
++{
++ kref_put(&mid->refcount, __release_mid);
++}
++
+ #endif /* _CIFSPROTO_H */
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index bd33661dcb57f..024f54a0be052 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -156,13 +156,14 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
+ /* If server is a channel, select the primary channel */
+ pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+
+- spin_lock(&pserver->srv_lock);
++ /* if we need to signal just this channel */
+ if (!all_channels) {
+- pserver->tcpStatus = CifsNeedReconnect;
+- spin_unlock(&pserver->srv_lock);
++ spin_lock(&server->srv_lock);
++ if (server->tcpStatus != CifsExiting)
++ server->tcpStatus = CifsNeedReconnect;
++ spin_unlock(&server->srv_lock);
+ return;
+ }
+- spin_unlock(&pserver->srv_lock);
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+@@ -3849,8 +3850,12 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+ is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ spin_unlock(&ses->chan_lock);
+
+- if (!is_binding)
++ if (!is_binding) {
+ ses->ses_status = SES_IN_SETUP;
++
++ /* force iface_list refresh */
++ ses->iface_last_update = 0;
++ }
+ spin_unlock(&ses->ses_lock);
+
+ /* update ses ip_addr only for primary chan */
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index cb85d7977b1e3..9e242b0f48fcd 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -567,6 +567,10 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
+ cifs_dbg(FYI, "Symlink\n");
+ fattr->cf_mode |= S_IFLNK;
+ fattr->cf_dtype = DT_LNK;
++ } else if (memcmp("LnxFIFO", pbuf, 8) == 0) {
++ cifs_dbg(FYI, "FIFO\n");
++ fattr->cf_mode |= S_IFIFO;
++ fattr->cf_dtype = DT_FIFO;
+ } else {
+ fattr->cf_mode |= S_IFREG; /* file? */
+ fattr->cf_dtype = DT_REG;
+diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
+index f7160003e0ed9..73ededa8eba5c 100644
+--- a/fs/smb/client/ioctl.c
++++ b/fs/smb/client/ioctl.c
+@@ -117,6 +117,20 @@ out_drop_write:
+ return rc;
+ }
+
++static long smb_mnt_get_tcon_info(struct cifs_tcon *tcon, void __user *arg)
++{
++ int rc = 0;
++ struct smb_mnt_tcon_info tcon_inf;
++
++ tcon_inf.tid = tcon->tid;
++ tcon_inf.session_id = tcon->ses->Suid;
++
++ if (copy_to_user(arg, &tcon_inf, sizeof(struct smb_mnt_tcon_info)))
++ rc = -EFAULT;
++
++ return rc;
++}
++
+ static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
+ void __user *arg)
+ {
+@@ -414,6 +428,17 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
+ tcon = tlink_tcon(pSMBFile->tlink);
+ rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
+ break;
++ case CIFS_IOC_GET_TCON_INFO:
++ cifs_sb = CIFS_SB(inode->i_sb);
++ tlink = cifs_sb_tlink(cifs_sb);
++ if (IS_ERR(tlink)) {
++ rc = PTR_ERR(tlink);
++ break;
++ }
++ tcon = tlink_tcon(tlink);
++ rc = smb_mnt_get_tcon_info(tcon, (void __user *)arg);
++ cifs_put_tlink(tlink);
++ break;
+ case CIFS_ENUMERATE_SNAPSHOTS:
+ if (pSMBFile == NULL)
+ break;
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index c57ca2050b73f..1e9a49cb5696b 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -186,7 +186,6 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ }
+
+ if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+- ses->chan_max = 1;
+ spin_unlock(&ses->chan_lock);
+ cifs_server_dbg(VFS, "no multichannel support\n");
+ return 0;
+diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
+index 3935a60db5c31..446647df79dc3 100644
+--- a/fs/smb/client/smb2misc.c
++++ b/fs/smb/client/smb2misc.c
+@@ -787,7 +787,7 @@ __smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
+ {
+ struct close_cancelled_open *cancelled;
+
+- cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC);
++ cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+ if (!cancelled)
+ return -ENOMEM;
+
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index a5cba71c30aed..0b71a1cc22980 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -5212,7 +5212,7 @@ smb2_make_node(unsigned int xid, struct inode *inode,
+ * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
+ */
+
+- if (!S_ISCHR(mode) && !S_ISBLK(mode))
++ if (!S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode))
+ return rc;
+
+ cifs_dbg(FYI, "sfu compat create special file\n");
+@@ -5260,6 +5260,12 @@ smb2_make_node(unsigned int xid, struct inode *inode,
+ pdev->minor = cpu_to_le64(MINOR(dev));
+ rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+ &bytes_written, iov, 1);
++ } else if (S_ISFIFO(mode)) {
++ memcpy(pdev->type, "LnxFIFO", 8);
++ pdev->major = 0;
++ pdev->minor = 0;
++ rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
++ &bytes_written, iov, 1);
+ }
+ tcon->ses->server->ops->close(xid, tcon, &fid);
+ d_drop(dentry);
+diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
+index 7676091b3e77a..21fc6d84e396d 100644
+--- a/fs/smb/client/smb2transport.c
++++ b/fs/smb/client/smb2transport.c
+@@ -452,6 +452,8 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ ptriplet->encryption.context,
+ ses->smb3encryptionkey,
+ SMB3_ENC_DEC_KEY_SIZE);
++ if (rc)
++ return rc;
+ rc = generate_key(ses, ptriplet->decryption.label,
+ ptriplet->decryption.context,
+ ses->smb3decryptionkey,
+@@ -460,9 +462,6 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ return rc;
+ }
+
+- if (rc)
+- return rc;
+-
+ #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
+ cifs_dbg(VFS, "%s: dumping generated AES session keys\n", __func__);
+ /*
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index 2b9a2ed45a652..a48591d79b667 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -76,7 +76,7 @@ alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
+ return temp;
+ }
+
+-static void __release_mid(struct kref *refcount)
++void __release_mid(struct kref *refcount)
+ {
+ struct mid_q_entry *midEntry =
+ container_of(refcount, struct mid_q_entry, refcount);
+@@ -156,15 +156,6 @@ static void __release_mid(struct kref *refcount)
+ mempool_free(midEntry, cifs_mid_poolp);
+ }
+
+-void release_mid(struct mid_q_entry *mid)
+-{
+- struct TCP_Server_Info *server = mid->server;
+-
+- spin_lock(&server->mid_lock);
+- kref_put(&mid->refcount, __release_mid);
+- spin_unlock(&server->mid_lock);
+-}
+-
+ void
+ delete_mid(struct mid_q_entry *mid)
+ {
+diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
+index 4ad5531686d81..c2bf829310bee 100644
+--- a/fs/smb/client/xattr.c
++++ b/fs/smb/client/xattr.c
+@@ -150,10 +150,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ goto out;
+
+- if (pTcon->ses->server->ops->set_EA)
++ if (pTcon->ses->server->ops->set_EA) {
+ rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
+ full_path, name, value, (__u16)size,
+ cifs_sb->local_nls, cifs_sb);
++ if (rc == 0)
++ inode_set_ctime_current(inode);
++ }
+ break;
+
+ case XATTR_CIFS_ACL:
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index c2b75d8988528..6cd9d117efaa2 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -372,11 +372,22 @@ static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
+ return 0;
+ }
+
++/**
++ * set_smb1_rsp_status() - set error type in smb response header
++ * @work: smb work containing smb response header
++ * @err: error code to set in response
++ */
++static void set_smb1_rsp_status(struct ksmbd_work *work, __le32 err)
++{
++ work->send_no_response = 1;
++}
++
+ static struct smb_version_ops smb1_server_ops = {
+ .get_cmd_val = get_smb1_cmd_val,
+ .init_rsp_hdr = init_smb1_rsp_hdr,
+ .allocate_rsp_buf = smb1_allocate_rsp_buf,
+ .check_user_session = smb1_check_user_session,
++ .set_rsp_status = set_smb1_rsp_status,
+ };
+
+ static int smb1_negotiate(struct ksmbd_work *work)
+diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
+index e5e438bf54996..c9de2b4684848 100644
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -1107,6 +1107,7 @@ pass:
+ struct smb_acl *pdacl;
+ struct smb_sid *powner_sid = NULL, *pgroup_sid = NULL;
+ int powner_sid_size = 0, pgroup_sid_size = 0, pntsd_size;
++ int pntsd_alloc_size;
+
+ if (parent_pntsd->osidoffset) {
+ powner_sid = (struct smb_sid *)((char *)parent_pntsd +
+@@ -1119,9 +1120,10 @@ pass:
+ pgroup_sid_size = 1 + 1 + 6 + (pgroup_sid->num_subauth * 4);
+ }
+
+- pntsd = kzalloc(sizeof(struct smb_ntsd) + powner_sid_size +
+- pgroup_sid_size + sizeof(struct smb_acl) +
+- nt_size, GFP_KERNEL);
++ pntsd_alloc_size = sizeof(struct smb_ntsd) + powner_sid_size +
++ pgroup_sid_size + sizeof(struct smb_acl) + nt_size;
++
++ pntsd = kzalloc(pntsd_alloc_size, GFP_KERNEL);
+ if (!pntsd) {
+ rc = -ENOMEM;
+ goto free_aces_base;
+@@ -1136,6 +1138,27 @@ pass:
+ pntsd->gsidoffset = parent_pntsd->gsidoffset;
+ pntsd->dacloffset = parent_pntsd->dacloffset;
+
++ if ((u64)le32_to_cpu(pntsd->osidoffset) + powner_sid_size >
++ pntsd_alloc_size) {
++ rc = -EINVAL;
++ kfree(pntsd);
++ goto free_aces_base;
++ }
++
++ if ((u64)le32_to_cpu(pntsd->gsidoffset) + pgroup_sid_size >
++ pntsd_alloc_size) {
++ rc = -EINVAL;
++ kfree(pntsd);
++ goto free_aces_base;
++ }
++
++ if ((u64)le32_to_cpu(pntsd->dacloffset) + sizeof(struct smb_acl) + nt_size >
++ pntsd_alloc_size) {
++ rc = -EINVAL;
++ kfree(pntsd);
++ goto free_aces_base;
++ }
++
+ if (pntsd->osidoffset) {
+ struct smb_sid *owner_sid = (struct smb_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->osidoffset));
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index 3d5d652153a5b..1c9bfc0d67777 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -173,10 +173,6 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
+ return err;
+ }
+
+- err = mnt_want_write(path.mnt);
+- if (err)
+- goto out_err;
+-
+ mode |= S_IFREG;
+ err = vfs_create(mnt_idmap(path.mnt), d_inode(path.dentry),
+ dentry, mode, true);
+@@ -186,9 +182,7 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
+ } else {
+ pr_err("File(%s): creation failed (err:%d)\n", name, err);
+ }
+- mnt_drop_write(path.mnt);
+
+-out_err:
+ done_path_create(&path, dentry);
+ return err;
+ }
+@@ -219,10 +213,6 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
+ return err;
+ }
+
+- err = mnt_want_write(path.mnt);
+- if (err)
+- goto out_err2;
+-
+ idmap = mnt_idmap(path.mnt);
+ mode |= S_IFDIR;
+ err = vfs_mkdir(idmap, d_inode(path.dentry), dentry, mode);
+@@ -233,21 +223,19 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
+ dentry->d_name.len);
+ if (IS_ERR(d)) {
+ err = PTR_ERR(d);
+- goto out_err1;
++ goto out_err;
+ }
+ if (unlikely(d_is_negative(d))) {
+ dput(d);
+ err = -ENOENT;
+- goto out_err1;
++ goto out_err;
+ }
+
+ ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(d));
+ dput(d);
+ }
+
+-out_err1:
+- mnt_drop_write(path.mnt);
+-out_err2:
++out_err:
+ done_path_create(&path, dentry);
+ if (err)
+ pr_err("mkdir(%s): creation failed (err:%d)\n", name, err);
+@@ -665,16 +653,11 @@ int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname,
+ goto out3;
+ }
+
+- err = mnt_want_write(newpath.mnt);
+- if (err)
+- goto out3;
+-
+ err = vfs_link(oldpath.dentry, mnt_idmap(newpath.mnt),
+ d_inode(newpath.dentry),
+ dentry, NULL);
+ if (err)
+ ksmbd_debug(VFS, "vfs_link failed err %d\n", err);
+- mnt_drop_write(newpath.mnt);
+
+ out3:
+ done_path_create(&newpath, dentry);
+diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c
+index 0e5dba2343ea1..e6609067ef261 100644
+--- a/fs/xfs/xfs_inode_item_recover.c
++++ b/fs/xfs/xfs_inode_item_recover.c
+@@ -369,24 +369,26 @@ xlog_recover_inode_commit_pass2(
+ * superblock flag to determine whether we need to look at di_flushiter
+ * to skip replay when the on disk inode is newer than the log one
+ */
+- if (!xfs_has_v3inodes(mp) &&
+- ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
+- /*
+- * Deal with the wrap case, DI_MAX_FLUSH is less
+- * than smaller numbers
+- */
+- if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
+- ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
+- /* do nothing */
+- } else {
+- trace_xfs_log_recover_inode_skip(log, in_f);
+- error = 0;
+- goto out_release;
++ if (!xfs_has_v3inodes(mp)) {
++ if (ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
++ /*
++ * Deal with the wrap case, DI_MAX_FLUSH is less
++ * than smaller numbers
++ */
++ if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
++ ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
++ /* do nothing */
++ } else {
++ trace_xfs_log_recover_inode_skip(log, in_f);
++ error = 0;
++ goto out_release;
++ }
+ }
++
++ /* Take the opportunity to reset the flush iteration count */
++ ldip->di_flushiter = 0;
+ }
+
+- /* Take the opportunity to reset the flush iteration count */
+- ldip->di_flushiter = 0;
+
+ if (unlikely(S_ISREG(ldip->di_mode))) {
+ if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
+diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
+index 3c8bba9f1114a..be1dd4c1a9174 100644
+--- a/include/acpi/ghes.h
++++ b/include/acpi/ghes.h
+@@ -73,8 +73,12 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb);
+ void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
+
+ struct list_head *ghes_get_devices(void);
++
++void ghes_estatus_pool_region_free(unsigned long addr, u32 size);
+ #else
+ static inline struct list_head *ghes_get_devices(void) { return NULL; }
++
++static inline void ghes_estatus_pool_region_free(unsigned long addr, u32 size) { return; }
+ #endif
+
+ int ghes_estatus_pool_init(unsigned int num_ghes);
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 98a7d6fd10360..a8b775e9d4d1a 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -890,10 +890,14 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
+ aux->ctx_field_size = size;
+ }
+
++static bool bpf_is_ldimm64(const struct bpf_insn *insn)
++{
++ return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
++}
++
+ static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
+ {
+- return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
+- insn->src_reg == BPF_PSEUDO_FUNC;
++ return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
+ }
+
+ struct bpf_prog_ops {
+diff --git a/include/linux/damon.h b/include/linux/damon.h
+index d5d4d19928e0a..4ba68cc635e2e 100644
+--- a/include/linux/damon.h
++++ b/include/linux/damon.h
+@@ -626,6 +626,13 @@ static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
+ return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
+ }
+
++static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs)
++{
++ /* {aggr,sample}_interval are unsigned long, hence could overflow */
++ return min(attrs->aggr_interval / attrs->sample_interval,
++ (unsigned long)UINT_MAX);
++}
++
+
+ int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
+ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index 62b61527bcc4f..1b523fd48586f 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -1045,10 +1045,10 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
+
+ /**
+ * ethtool_sprintf - Write formatted string to ethtool string data
+- * @data: Pointer to start of string to update
++ * @data: Pointer to a pointer to the start of string to update
+ * @fmt: Format of string to write
+ *
+- * Write formatted string to data. Update data to point at start of
++ * Write formatted string to *data. Update *data to point at start of
+ * next string.
+ */
+ extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
+diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
+index a82a4bb6ce68b..cf1adceb02697 100644
+--- a/include/linux/f2fs_fs.h
++++ b/include/linux/f2fs_fs.h
+@@ -104,6 +104,7 @@ enum f2fs_error {
+ ERROR_CORRUPTED_VERITY_XATTR,
+ ERROR_CORRUPTED_XATTR,
+ ERROR_INVALID_NODE_REFERENCE,
++ ERROR_INCONSISTENT_NAT,
+ ERROR_MAX,
+ };
+
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 87a21a18d114a..88cdfd90f0b2b 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1474,7 +1474,50 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb,
+ kgid_has_mapping(fs_userns, kgid);
+ }
+
+-extern struct timespec64 current_time(struct inode *inode);
++struct timespec64 current_time(struct inode *inode);
++struct timespec64 inode_set_ctime_current(struct inode *inode);
++
++/**
++ * inode_get_ctime - fetch the current ctime from the inode
++ * @inode: inode from which to fetch ctime
++ *
++ * Grab the current ctime from the inode and return it.
++ */
++static inline struct timespec64 inode_get_ctime(const struct inode *inode)
++{
++ return inode->i_ctime;
++}
++
++/**
++ * inode_set_ctime_to_ts - set the ctime in the inode
++ * @inode: inode in which to set the ctime
++ * @ts: value to set in the ctime field
++ *
++ * Set the ctime in @inode to @ts
++ */
++static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
++ struct timespec64 ts)
++{
++ inode->i_ctime = ts;
++ return ts;
++}
++
++/**
++ * inode_set_ctime - set the ctime in the inode
++ * @inode: inode in which to set the ctime
++ * @sec: tv_sec value to set
++ * @nsec: tv_nsec value to set
++ *
++ * Set the ctime in @inode to { @sec, @nsec }
++ */
++static inline struct timespec64 inode_set_ctime(struct inode *inode,
++ time64_t sec, long nsec)
++{
++ struct timespec64 ts = { .tv_sec = sec,
++ .tv_nsec = nsec };
++
++ return inode_set_ctime_to_ts(inode, ts);
++}
+
+ /*
+ * Snapshotting support.
+diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
+index 107613f7d7920..f6cd0f909d9fb 100644
+--- a/include/linux/generic-radix-tree.h
++++ b/include/linux/generic-radix-tree.h
+@@ -38,6 +38,7 @@
+
+ #include <asm/page.h>
+ #include <linux/bug.h>
++#include <linux/limits.h>
+ #include <linux/log2.h>
+ #include <linux/math.h>
+ #include <linux/types.h>
+@@ -184,6 +185,12 @@ void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
+ static inline void __genradix_iter_advance(struct genradix_iter *iter,
+ size_t obj_size)
+ {
++ if (iter->offset + obj_size < iter->offset) {
++ iter->offset = SIZE_MAX;
++ iter->pos = SIZE_MAX;
++ return;
++ }
++
+ iter->offset += obj_size;
+
+ if (!is_power_of_2(obj_size) &&
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index d8a6fdce93738..90081afa10ce5 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -215,8 +215,6 @@ struct irq_data {
+ * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
+ * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
+ * IRQD_CAN_RESERVE - Can use reservation mode
+- * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
+- * required
+ * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
+ * from actual interrupt context.
+ * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
+@@ -247,11 +245,10 @@ enum {
+ IRQD_SINGLE_TARGET = BIT(24),
+ IRQD_DEFAULT_TRIGGER_SET = BIT(25),
+ IRQD_CAN_RESERVE = BIT(26),
+- IRQD_MSI_NOMASK_QUIRK = BIT(27),
+- IRQD_HANDLE_ENFORCE_IRQCTX = BIT(28),
+- IRQD_AFFINITY_ON_ACTIVATE = BIT(29),
+- IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(30),
+- IRQD_RESEND_WHEN_IN_PROGRESS = BIT(31),
++ IRQD_HANDLE_ENFORCE_IRQCTX = BIT(27),
++ IRQD_AFFINITY_ON_ACTIVATE = BIT(28),
++ IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(29),
++ IRQD_RESEND_WHEN_IN_PROGRESS = BIT(30),
+ };
+
+ #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
+@@ -426,21 +423,6 @@ static inline bool irqd_can_reserve(struct irq_data *d)
+ return __irqd_to_state(d) & IRQD_CAN_RESERVE;
+ }
+
+-static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
+-{
+- __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+-static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
+-{
+- __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+-static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
+-{
+- return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
+-}
+-
+ static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+ {
+ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
+index af796986baee6..1d71a043460cf 100644
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -48,7 +48,7 @@ LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
+ LSM_HOOK(int, 0, syslog, int type)
+ LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
+ const struct timezone *tz)
+-LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
++LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages)
+ LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
+ LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
+ LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
+@@ -273,7 +273,7 @@ LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
+ LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
+ LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
+ LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
+-LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
++LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, void **ctx,
+ u32 *ctxlen)
+
+ #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index daa2f40d9ce65..7b12eebc5586d 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -295,7 +295,9 @@ struct mmc_card {
+ #define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
+ #define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
+ #define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */
++#define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */
+
++ bool written_flag; /* Indicates eMMC has been written since power on */
+ bool reenable_cmdq; /* Re-enable Command Queue */
+
+ unsigned int erase_size; /* erase size in sectors */
+diff --git a/include/linux/msi.h b/include/linux/msi.h
+index a50ea79522f85..ddace8c34dcf9 100644
+--- a/include/linux/msi.h
++++ b/include/linux/msi.h
+@@ -547,12 +547,6 @@ enum {
+ MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5),
+ /* Free MSI descriptors */
+ MSI_FLAG_FREE_MSI_DESCS = (1 << 6),
+- /*
+- * Quirk to handle MSI implementations which do not provide
+- * masking. Currently known to affect x86, but has to be partially
+- * handled in the core MSI code.
+- */
+- MSI_FLAG_NOMASK_QUIRK = (1 << 7),
+
+ /* Mask for the generic functionality */
+ MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 7702f078ef4ad..54bc1ca7b66fc 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -180,6 +180,8 @@
+ #define PCI_DEVICE_ID_BERKOM_A4T 0xffa4
+ #define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8
+
++#define PCI_VENDOR_ID_ITTIM 0x0b48
++
+ #define PCI_VENDOR_ID_COMPAQ 0x0e11
+ #define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508
+ #define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index e7afd0dd8a3d1..ff29da8e35f6c 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -843,11 +843,11 @@ struct perf_event {
+ };
+
+ /*
+- * ,-----------------------[1:n]----------------------.
+- * V V
+- * perf_event_context <-[1:n]-> perf_event_pmu_context <--- perf_event
+- * ^ ^ | |
+- * `--------[1:n]---------' `-[n:1]-> pmu <-[1:n]-'
++ * ,-----------------------[1:n]------------------------.
++ * V V
++ * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event
++ * | |
++ * `--[n:1]-> pmu <-[1:n]--'
+ *
+ *
+ * struct perf_event_pmu_context lifetime is refcount based and RCU freed
+@@ -865,6 +865,9 @@ struct perf_event {
+ * ctx->mutex pinning the configuration. Since we hold a reference on
+ * group_leader (through the filedesc) it can't go away, therefore it's
+ * associated pmu_ctx must exist and cannot change due to ctx->mutex.
++ *
++ * perf_event holds a refcount on perf_event_context
++ * perf_event holds a refcount on perf_event_pmu_context
+ */
+ struct perf_event_pmu_context {
+ struct pmu *pmu;
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 1424670df161d..9aa6358a1a16b 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -99,14 +99,21 @@ static __always_inline unsigned char interrupt_context_level(void)
+ return level;
+ }
+
++/*
++ * These macro definitions avoid redundant invocations of preempt_count()
++ * because such invocations would result in redundant loads given that
++ * preempt_count() is commonly implemented with READ_ONCE().
++ */
++
+ #define nmi_count() (preempt_count() & NMI_MASK)
+ #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+ #ifdef CONFIG_PREEMPT_RT
+ # define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
++# define irq_count() ((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
+ #else
+ # define softirq_count() (preempt_count() & SOFTIRQ_MASK)
++# define irq_count() (preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
+ #endif
+-#define irq_count() (nmi_count() | hardirq_count() | softirq_count())
+
+ /*
+ * Macros to retrieve the current execution context:
+@@ -119,7 +126,11 @@ static __always_inline unsigned char interrupt_context_level(void)
+ #define in_nmi() (nmi_count())
+ #define in_hardirq() (hardirq_count())
+ #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+-#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
++#ifdef CONFIG_PREEMPT_RT
++# define in_task() (!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
++#else
++# define in_task() (!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
++#endif
+
+ /*
+ * The following macros are deprecated and should not be used in new code:
+diff --git a/include/linux/pwm.h b/include/linux/pwm.h
+index 04ae1d9073a74..0755ba9938f74 100644
+--- a/include/linux/pwm.h
++++ b/include/linux/pwm.h
+@@ -41,8 +41,8 @@ struct pwm_args {
+ };
+
+ enum {
+- PWMF_REQUESTED = 1 << 0,
+- PWMF_EXPORTED = 1 << 1,
++ PWMF_REQUESTED = 0,
++ PWMF_EXPORTED = 1,
+ };
+
+ /*
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index 39b74d83c7c4a..cfcb7e2c3813f 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -383,6 +383,7 @@ struct ucred {
+ #define SOL_MPTCP 284
+ #define SOL_MCTP 285
+ #define SOL_SMC 286
++#define SOL_VSOCK 287
+
+ /* IPX options */
+ #define IPX_TYPE 1
+diff --git a/include/linux/string.h b/include/linux/string.h
+index 9e3cb6923b0ef..5077776e995e0 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -5,7 +5,9 @@
+ #include <linux/compiler.h> /* for inline */
+ #include <linux/types.h> /* for size_t */
+ #include <linux/stddef.h> /* for NULL */
++#include <linux/err.h> /* for ERR_PTR() */
+ #include <linux/errno.h> /* for E2BIG */
++#include <linux/overflow.h> /* for check_mul_overflow() */
+ #include <linux/stdarg.h>
+ #include <uapi/linux/string.h>
+
+@@ -14,6 +16,44 @@ extern void *memdup_user(const void __user *, size_t);
+ extern void *vmemdup_user(const void __user *, size_t);
+ extern void *memdup_user_nul(const void __user *, size_t);
+
++/**
++ * memdup_array_user - duplicate array from user space
++ * @src: source address in user space
++ * @n: number of array members to copy
++ * @size: size of one array member
++ *
++ * Return: an ERR_PTR() on failure. Result is physically
++ * contiguous, to be freed by kfree().
++ */
++static inline void *memdup_array_user(const void __user *src, size_t n, size_t size)
++{
++ size_t nbytes;
++
++ if (check_mul_overflow(n, size, &nbytes))
++ return ERR_PTR(-EOVERFLOW);
++
++ return memdup_user(src, nbytes);
++}
++
++/**
++ * vmemdup_array_user - duplicate array from user space
++ * @src: source address in user space
++ * @n: number of array members to copy
++ * @size: size of one array member
++ *
++ * Return: an ERR_PTR() on failure. Result may be not
++ * physically contiguous. Use kvfree() to free.
++ */
++static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
++{
++ size_t nbytes;
++
++ if (check_mul_overflow(n, size, &nbytes))
++ return ERR_PTR(-EOVERFLOW);
++
++ return vmemdup_user(src, nbytes);
++}
++
+ /*
+ * Include machine specific inline routines
+ */
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index 4f41d839face4..03722690f2c39 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -92,6 +92,7 @@ struct rpc_clnt {
+ };
+ const struct cred *cl_cred;
+ unsigned int cl_max_connect; /* max number of transports not to the same IP */
++ struct super_block *pipefs_sb;
+ };
+
+ /*
+diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
+index 59d451f455bfb..094e5eaef072d 100644
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -227,6 +227,7 @@ extern void __register_sysctl_init(const char *path, struct ctl_table *table,
+ extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
+
+ void do_sysctl_args(void);
++bool sysctl_is_alias(char *param);
+ int do_proc_douintvec(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos,
+ int (*conv)(unsigned long *lvalp,
+@@ -270,6 +271,11 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
+ static inline void do_sysctl_args(void)
+ {
+ }
++
++static inline bool sysctl_is_alias(char *param)
++{
++ return false;
++}
+ #endif /* CONFIG_SYSCTL */
+
+ int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
+diff --git a/include/linux/torture.h b/include/linux/torture.h
+index 7038104463e48..017f0f710815a 100644
+--- a/include/linux/torture.h
++++ b/include/linux/torture.h
+@@ -81,7 +81,8 @@ static inline void torture_random_init(struct torture_random_state *trsp)
+ }
+
+ /* Definitions for high-resolution-timer sleeps. */
+-int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp);
++int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode,
++ struct torture_random_state *trsp);
+ int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp);
+ int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp);
+ int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp);
+@@ -108,12 +109,15 @@ bool torture_must_stop(void);
+ bool torture_must_stop_irq(void);
+ void torture_kthread_stopping(char *title);
+ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
+- char *f, struct task_struct **tp);
++ char *f, struct task_struct **tp, void (*cbf)(struct task_struct *tp));
+ void _torture_stop_kthread(char *m, struct task_struct **tp);
+
+ #define torture_create_kthread(n, arg, tp) \
+ _torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
+- "Failed to create " #n, &(tp))
++ "Failed to create " #n, &(tp), NULL)
++#define torture_create_kthread_cb(n, arg, tp, cbf) \
++ _torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
++ "Failed to create " #n, &(tp), cbf)
+ #define torture_stop_kthread(n, tp) \
+ _torture_stop_kthread("Stopping " #n " task", &(tp))
+
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index faa579209a724..40436b7ddfd24 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -492,6 +492,7 @@ enum {
+ EVENT_FILE_FL_TRIGGER_COND_BIT,
+ EVENT_FILE_FL_PID_FILTER_BIT,
+ EVENT_FILE_FL_WAS_ENABLED_BIT,
++ EVENT_FILE_FL_FREED_BIT,
+ };
+
+ extern struct trace_event_file *trace_get_event_file(const char *instance,
+@@ -630,6 +631,7 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
+ * TRIGGER_COND - When set, one or more triggers has an associated filter
+ * PID_FILTER - When set, the event is filtered based on pid
+ * WAS_ENABLED - Set when enabled to know to clear trace on module removal
++ * FREED - File descriptor is freed, all fields should be considered invalid
+ */
+ enum {
+ EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
+@@ -643,6 +645,7 @@ enum {
+ EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
+ EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
+ EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
++ EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT),
+ };
+
+ struct trace_event_file {
+@@ -671,6 +674,7 @@ struct trace_event_file {
+ * caching and such. Which is mostly OK ;-)
+ */
+ unsigned long flags;
++ atomic_t ref; /* ref count for opened files */
+ atomic_t sm_ref; /* soft-mode reference counter */
+ atomic_t tm_ref; /* trigger-mode reference counter */
+ };
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index 683efe29fa698..ca26c1f94f044 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -222,18 +222,16 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
+ * to generate better code.
+ */
+ #ifdef CONFIG_LOCKDEP
+-#define __INIT_WORK(_work, _func, _onstack) \
++#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
+ do { \
+- static struct lock_class_key __key; \
+- \
+ __init_work((_work), _onstack); \
+ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
+- lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
++ lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
+ INIT_LIST_HEAD(&(_work)->entry); \
+ (_work)->func = (_func); \
+ } while (0)
+ #else
+-#define __INIT_WORK(_work, _func, _onstack) \
++#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
+ do { \
+ __init_work((_work), _onstack); \
+ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
+@@ -242,12 +240,22 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
+ } while (0)
+ #endif
+
++#define __INIT_WORK(_work, _func, _onstack) \
++ do { \
++ static __maybe_unused struct lock_class_key __key; \
++ \
++ __INIT_WORK_KEY(_work, _func, _onstack, &__key); \
++ } while (0)
++
+ #define INIT_WORK(_work, _func) \
+ __INIT_WORK((_work), (_func), 0)
+
+ #define INIT_WORK_ONSTACK(_work, _func) \
+ __INIT_WORK((_work), (_func), 1)
+
++#define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \
++ __INIT_WORK_KEY((_work), (_func), 1, _key)
++
+ #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
+ do { \
+ INIT_WORK(&(_work)->work, (_func)); \
+@@ -683,8 +691,32 @@ static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
+ return fn(arg);
+ }
+ #else
+-long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
+-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
++long work_on_cpu_key(int cpu, long (*fn)(void *),
++ void *arg, struct lock_class_key *key);
++/*
++ * A new key is defined for each caller to make sure the work
++ * associated with the function doesn't share its locking class.
++ */
++#define work_on_cpu(_cpu, _fn, _arg) \
++({ \
++ static struct lock_class_key __key; \
++ \
++ work_on_cpu_key(_cpu, _fn, _arg, &__key); \
++})
++
++long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
++ void *arg, struct lock_class_key *key);
++
++/*
++ * A new key is defined for each caller to make sure the work
++ * associated with the function doesn't share its locking class.
++ */
++#define work_on_cpu_safe(_cpu, _fn, _arg) \
++({ \
++ static struct lock_class_key __key; \
++ \
++ work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
++})
+ #endif /* CONFIG_SMP */
+
+ #ifdef CONFIG_FREEZER
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 7c816359d5a98..75972e211ba12 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -178,9 +178,9 @@ static inline __be32 nft_reg_load_be32(const u32 *sreg)
+ return *(__force __be32 *)sreg;
+ }
+
+-static inline void nft_reg_store64(u32 *dreg, u64 val)
++static inline void nft_reg_store64(u64 *dreg, u64 val)
+ {
+- put_unaligned(val, (u64 *)dreg);
++ put_unaligned(val, dreg);
+ }
+
+ static inline u64 nft_reg_load64(const u32 *sreg)
+diff --git a/include/net/sock.h b/include/net/sock.h
+index fc189910e63fc..b9f0ef4bb527a 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2006,21 +2006,33 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
+ /* sk_tx_queue_mapping accept only upto a 16-bit value */
+ if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
+ return;
+- sk->sk_tx_queue_mapping = tx_queue;
++ /* Paired with READ_ONCE() in sk_tx_queue_get() and
++ * other WRITE_ONCE() because socket lock might be not held.
++ */
++ WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
+ }
+
+ #define NO_QUEUE_MAPPING USHRT_MAX
+
+ static inline void sk_tx_queue_clear(struct sock *sk)
+ {
+- sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
++ /* Paired with READ_ONCE() in sk_tx_queue_get() and
++ * other WRITE_ONCE() because socket lock might be not held.
++ */
++ WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
+ }
+
+ static inline int sk_tx_queue_get(const struct sock *sk)
+ {
+- if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
+- return sk->sk_tx_queue_mapping;
++ if (sk) {
++ /* Paired with WRITE_ONCE() in sk_tx_queue_clear()
++ * and sk_tx_queue_set().
++ */
++ int val = READ_ONCE(sk->sk_tx_queue_mapping);
+
++ if (val != NO_QUEUE_MAPPING)
++ return val;
++ }
+ return -1;
+ }
+
+@@ -2169,7 +2181,7 @@ static inline void __dst_negative_advice(struct sock *sk)
+ if (ndst != dst) {
+ rcu_assign_pointer(sk->sk_dst_cache, ndst);
+ sk_tx_queue_clear(sk);
+- sk->sk_dst_pending_confirm = 0;
++ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ }
+ }
+ }
+@@ -2186,7 +2198,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ struct dst_entry *old_dst;
+
+ sk_tx_queue_clear(sk);
+- sk->sk_dst_pending_confirm = 0;
++ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ old_dst = rcu_dereference_protected(sk->sk_dst_cache,
+ lockdep_sock_is_held(sk));
+ rcu_assign_pointer(sk->sk_dst_cache, dst);
+@@ -2199,7 +2211,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ struct dst_entry *old_dst;
+
+ sk_tx_queue_clear(sk);
+- sk->sk_dst_pending_confirm = 0;
++ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ dst_release(old_dst);
+ }
+diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
+index b24ea2d9400ba..1dc2f827d0bcf 100644
+--- a/include/net/tc_act/tc_ct.h
++++ b/include/net/tc_act/tc_ct.h
+@@ -57,6 +57,11 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+ return to_ct_params(a)->nf_ft;
+ }
+
++static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
++{
++ return to_ct_params(a)->helper;
++}
++
+ #else
+ static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; }
+ static inline int tcf_ct_action(const struct tc_action *a) { return 0; }
+@@ -64,6 +69,10 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+ {
+ return NULL;
+ }
++static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
++{
++ return NULL;
++}
+ #endif /* CONFIG_NF_CONNTRACK */
+
+ #if IS_ENABLED(CONFIG_NET_ACT_CT)
+diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
+index 528279056b3ab..1a5f90b0a5463 100644
+--- a/include/sound/soc-acpi.h
++++ b/include/sound/soc-acpi.h
+@@ -67,6 +67,10 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
+ * @i2s_link_mask: I2S/TDM links enabled on the board
+ * @num_dai_drivers: number of elements in @dai_drivers
+ * @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode
++ * @subsystem_vendor: optional PCI SSID vendor value
++ * @subsystem_device: optional PCI SSID device value
++ * @subsystem_id_set: true if a value has been written to
++ * subsystem_vendor and subsystem_device.
+ */
+ struct snd_soc_acpi_mach_params {
+ u32 acpi_ipc_irq_index;
+@@ -79,6 +83,9 @@ struct snd_soc_acpi_mach_params {
+ u32 i2s_link_mask;
+ u32 num_dai_drivers;
+ struct snd_soc_dai_driver *dai_drivers;
++ unsigned short subsystem_vendor;
++ unsigned short subsystem_device;
++ bool subsystem_id_set;
+ };
+
+ /**
+diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
+index fc94dfb0021fd..e8ff2e089cd00 100644
+--- a/include/sound/soc-card.h
++++ b/include/sound/soc-card.h
+@@ -59,6 +59,43 @@ int snd_soc_card_add_dai_link(struct snd_soc_card *card,
+ void snd_soc_card_remove_dai_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link);
+
++#ifdef CONFIG_PCI
++static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
++ unsigned short vendor,
++ unsigned short device)
++{
++ card->pci_subsystem_vendor = vendor;
++ card->pci_subsystem_device = device;
++ card->pci_subsystem_set = true;
++}
++
++static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
++ unsigned short *vendor,
++ unsigned short *device)
++{
++ if (!card->pci_subsystem_set)
++ return -ENOENT;
++
++ *vendor = card->pci_subsystem_vendor;
++ *device = card->pci_subsystem_device;
++
++ return 0;
++}
++#else /* !CONFIG_PCI */
++static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
++ unsigned short vendor,
++ unsigned short device)
++{
++}
++
++static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
++ unsigned short *vendor,
++ unsigned short *device)
++{
++ return -ENOENT;
++}
++#endif /* CONFIG_PCI */
++
+ /* device driver data */
+ static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
+ void *data)
+diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
+index e3906ecda740a..5827b4d882fcc 100644
+--- a/include/sound/soc-dai.h
++++ b/include/sound/soc-dai.h
+@@ -355,6 +355,7 @@ struct snd_soc_dai_ops {
+
+ /* bit field */
+ unsigned int no_capture_mute:1;
++ unsigned int mute_unmute_on_trigger:1;
+ };
+
+ struct snd_soc_cdai_ops {
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index cf34810882347..0c54b343d3e5d 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -931,6 +931,17 @@ struct snd_soc_card {
+ #ifdef CONFIG_DMI
+ char dmi_longname[80];
+ #endif /* CONFIG_DMI */
++
++#ifdef CONFIG_PCI
++ /*
++ * PCI does not define 0 as invalid, so pci_subsystem_set indicates
++ * whether a value has been written to these fields.
++ */
++ unsigned short pci_subsystem_vendor;
++ unsigned short pci_subsystem_device;
++ bool pci_subsystem_set;
++#endif /* CONFIG_PCI */
++
+ char topology_shortname[32];
+
+ struct device *dev;
+diff --git a/include/sound/sof.h b/include/sound/sof.h
+index d3c41f87ac319..51294f2ba302c 100644
+--- a/include/sound/sof.h
++++ b/include/sound/sof.h
+@@ -64,6 +64,14 @@ struct snd_sof_pdata {
+ const char *name;
+ const char *platform;
+
++ /*
++ * PCI SSID. As PCI does not define 0 as invalid, the subsystem_id_set
++ * flag indicates that a value has been written to these members.
++ */
++ unsigned short subsystem_vendor;
++ unsigned short subsystem_device;
++ bool subsystem_id_set;
++
+ struct device *dev;
+
+ /*
+diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
+index 3c36aeade991e..9a85c69782bdd 100644
+--- a/include/uapi/linux/prctl.h
++++ b/include/uapi/linux/prctl.h
+@@ -283,7 +283,7 @@ struct prctl_mm_map {
+
+ /* Memory deny write / execute */
+ #define PR_SET_MDWE 65
+-# define PR_MDWE_REFUSE_EXEC_GAIN 1
++# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0)
+
+ #define PR_GET_MDWE 66
+
+diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
+index c60ca33eac594..ed07181d4eff9 100644
+--- a/include/uapi/linux/vm_sockets.h
++++ b/include/uapi/linux/vm_sockets.h
+@@ -191,4 +191,21 @@ struct sockaddr_vm {
+
+ #define IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
+
++/* MSG_ZEROCOPY notifications are encoded in the standard error format,
++ * sock_extended_err. See Documentation/networking/msg_zerocopy.rst in
++ * kernel source tree for more details.
++ */
++
++/* 'cmsg_level' field value of 'struct cmsghdr' for notification parsing
++ * when MSG_ZEROCOPY flag is used on transmissions.
++ */
++
++#define SOL_VSOCK 287
++
++/* 'cmsg_type' field value of 'struct cmsghdr' for notification parsing
++ * when MSG_ZEROCOPY flag is used on transmissions.
++ */
++
++#define VSOCK_RECVERR 1
++
+ #endif /* _UAPI_VM_SOCKETS_H */
+diff --git a/include/video/sticore.h b/include/video/sticore.h
+index 945ad60463a18..012b5b46ad7d0 100644
+--- a/include/video/sticore.h
++++ b/include/video/sticore.h
+@@ -232,7 +232,7 @@ struct sti_rom_font {
+ u8 height;
+ u8 font_type; /* language type */
+ u8 bytes_per_char;
+- u32 next_font;
++ s32 next_font; /* note: signed int */
+ u8 underline_height;
+ u8 underline_pos;
+ u8 res008[2];
+diff --git a/init/Makefile b/init/Makefile
+index ec557ada3c12e..cbac576c57d63 100644
+--- a/init/Makefile
++++ b/init/Makefile
+@@ -60,4 +60,5 @@ include/generated/utsversion.h: FORCE
+ $(obj)/version-timestamp.o: include/generated/utsversion.h
+ CFLAGS_version-timestamp.o := -include include/generated/utsversion.h
+ KASAN_SANITIZE_version-timestamp.o := n
++KCSAN_SANITIZE_version-timestamp.o := n
+ GCOV_PROFILE_version-timestamp.o := n
+diff --git a/init/main.c b/init/main.c
+index ad920fac325c3..1e19a40f40c5f 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -530,6 +530,10 @@ static int __init unknown_bootoption(char *param, char *val,
+ {
+ size_t len = strlen(param);
+
++ /* Handle params aliased to sysctls */
++ if (sysctl_is_alias(param))
++ return 0;
++
+ repair_env_string(param, val);
+
+ /* Handle obsolete-style parameters */
+diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
+index b603a06f7103d..5fcfe03ed93ec 100644
+--- a/io_uring/fdinfo.c
++++ b/io_uring/fdinfo.c
+@@ -139,13 +139,8 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
+ if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
+ struct io_sq_data *sq = ctx->sq_data;
+
+- if (mutex_trylock(&sq->lock)) {
+- if (sq->thread) {
+- sq_pid = task_pid_nr(sq->thread);
+- sq_cpu = task_cpu(sq->thread);
+- }
+- mutex_unlock(&sq->lock);
+- }
++ sq_pid = sq->task_pid;
++ sq_cpu = sq->sq_cpu;
+ }
+
+ seq_printf(m, "SqThread:\t%d\n", sq_pid);
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index bd6c2c7959a5b..65b5dbe3c850e 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -214,6 +214,7 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd)
+ did_sig = get_signal(&ksig);
+ cond_resched();
+ mutex_lock(&sqd->lock);
++ sqd->sq_cpu = raw_smp_processor_id();
+ }
+ return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
+ }
+@@ -229,10 +230,15 @@ static int io_sq_thread(void *data)
+ snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
+ set_task_comm(current, buf);
+
+- if (sqd->sq_cpu != -1)
++ /* reset to our pid after we've set task_comm, for fdinfo */
++ sqd->task_pid = current->pid;
++
++ if (sqd->sq_cpu != -1) {
+ set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
+- else
++ } else {
+ set_cpus_allowed_ptr(current, cpu_online_mask);
++ sqd->sq_cpu = raw_smp_processor_id();
++ }
+
+ mutex_lock(&sqd->lock);
+ while (1) {
+@@ -261,6 +267,7 @@ static int io_sq_thread(void *data)
+ mutex_unlock(&sqd->lock);
+ cond_resched();
+ mutex_lock(&sqd->lock);
++ sqd->sq_cpu = raw_smp_processor_id();
+ }
+ continue;
+ }
+@@ -294,6 +301,7 @@ static int io_sq_thread(void *data)
+ mutex_unlock(&sqd->lock);
+ schedule();
+ mutex_lock(&sqd->lock);
++ sqd->sq_cpu = raw_smp_processor_id();
+ }
+ list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+ atomic_andnot(IORING_SQ_NEED_WAKEUP,
+diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
+index 65075f1e4ac8c..7a98cd176a127 100644
+--- a/kernel/audit_watch.c
++++ b/kernel/audit_watch.c
+@@ -527,11 +527,18 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
+ unsigned long ino;
+ dev_t dev;
+
+- exe_file = get_task_exe_file(tsk);
++ /* only do exe filtering if we are recording @current events/records */
++ if (tsk != current)
++ return 0;
++
++ if (!current->mm)
++ return 0;
++ exe_file = get_mm_exe_file(current->mm);
+ if (!exe_file)
+ return 0;
+ ino = file_inode(exe_file)->i_ino;
+ dev = file_inode(exe_file)->i_sb->s_dev;
+ fput(exe_file);
++
+ return audit_mark_compare(mark, ino, dev);
+ }
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index e3e45b651cd40..33d1a76b7fc5d 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -613,7 +613,11 @@ static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
+
+ if (val < ksym->start)
+ return -1;
+- if (val >= ksym->end)
++ /* Ensure that we detect return addresses as part of the program, when
++ * the final instruction is a call for a program part of the stack
++ * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
++ */
++ if (val > ksym->end)
+ return 1;
+
+ return 0;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index e7e2687c35884..9f27b40839831 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1513,7 +1513,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
+ if (state->in_async_callback_fn)
+ verbose(env, " async_cb");
+ verbose(env, "\n");
+- mark_verifier_state_clean(env);
++ if (!print_all)
++ mark_verifier_state_clean(env);
+ }
+
+ static inline u32 vlog_alignment(u32 pos)
+@@ -3192,12 +3193,29 @@ static int push_jmp_history(struct bpf_verifier_env *env,
+
+ /* Backtrack one insn at a time. If idx is not at the top of recorded
+ * history then previous instruction came from straight line execution.
++ * Return -ENOENT if we exhausted all instructions within given state.
++ *
++ * It's legal to have a bit of a looping with the same starting and ending
++ * insn index within the same state, e.g.: 3->4->5->3, so just because current
++ * instruction index is the same as state's first_idx doesn't mean we are
++ * done. If there is still some jump history left, we should keep going. We
++ * need to take into account that we might have a jump history between given
++ * state's parent and itself, due to checkpointing. In this case, we'll have
++ * history entry recording a jump from last instruction of parent state and
++ * first instruction of given state.
+ */
+ static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
+ u32 *history)
+ {
+ u32 cnt = *history;
+
++ if (i == st->first_insn_idx) {
++ if (cnt == 0)
++ return -ENOENT;
++ if (cnt == 1 && st->jmp_history[0].idx == i)
++ return -ENOENT;
++ }
++
+ if (cnt && st->jmp_history[cnt - 1].idx == i) {
+ i = st->jmp_history[cnt - 1].prev_idx;
+ (*history)--;
+@@ -3418,7 +3436,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ if (class == BPF_ALU || class == BPF_ALU64) {
+ if (!bt_is_reg_set(bt, dreg))
+ return 0;
+- if (opcode == BPF_MOV) {
++ if (opcode == BPF_END || opcode == BPF_NEG) {
++ /* sreg is reserved and unused
++ * dreg still need precision before this insn
++ */
++ return 0;
++ } else if (opcode == BPF_MOV) {
+ if (BPF_SRC(insn->code) == BPF_X) {
+ /* dreg = sreg
+ * dreg needs precision after this insn
+@@ -4072,10 +4095,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ * Nothing to be tracked further in the parent state.
+ */
+ return 0;
+- if (i == first_idx)
+- break;
+ subseq_idx = i;
+ i = get_prev_insn_idx(st, i, &history);
++ if (i == -ENOENT)
++ break;
+ if (i >= env->prog->len) {
+ /* This can happen if backtracking reached insn 0
+ * and there are still reg_mask or stack_mask
+@@ -4350,7 +4373,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ insn->imm != 0 && env->bpf_capable) {
+ struct bpf_reg_state fake_reg = {};
+
+- __mark_reg_known(&fake_reg, (u32)insn->imm);
++ __mark_reg_known(&fake_reg, insn->imm);
+ fake_reg.type = SCALAR_VALUE;
+ save_register_state(state, spi, &fake_reg, size);
+ } else if (reg && is_spillable_regtype(reg->type)) {
+@@ -13915,6 +13938,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ !sanitize_speculative_path(env, insn, *insn_idx + 1,
+ *insn_idx))
+ return -EFAULT;
++ if (env->log.level & BPF_LOG_LEVEL)
++ print_insn_state(env, this_branch->frame[this_branch->curframe]);
+ *insn_idx += insn->off;
+ return 0;
+ } else if (pred == 0) {
+@@ -13927,6 +13952,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ *insn_idx + insn->off + 1,
+ *insn_idx))
+ return -EFAULT;
++ if (env->log.level & BPF_LOG_LEVEL)
++ print_insn_state(env, this_branch->frame[this_branch->curframe]);
+ return 0;
+ }
+
+@@ -14558,15 +14585,16 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
+ struct bpf_verifier_env *env,
+ bool visit_callee)
+ {
+- int ret;
++ int ret, insn_sz;
+
+- ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
++ insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
++ ret = push_insn(t, t + insn_sz, FALLTHROUGH, env, false);
+ if (ret)
+ return ret;
+
+- mark_prune_point(env, t + 1);
++ mark_prune_point(env, t + insn_sz);
+ /* when we exit from subprog, we need to record non-linear history */
+- mark_jmp_point(env, t + 1);
++ mark_jmp_point(env, t + insn_sz);
+
+ if (visit_callee) {
+ mark_prune_point(env, t);
+@@ -14588,15 +14616,17 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
+ static int visit_insn(int t, struct bpf_verifier_env *env)
+ {
+ struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
+- int ret;
++ int ret, insn_sz;
+
+ if (bpf_pseudo_func(insn))
+ return visit_func_call_insn(t, insns, env, true);
+
+ /* All non-branch instructions have a single fall-through edge. */
+ if (BPF_CLASS(insn->code) != BPF_JMP &&
+- BPF_CLASS(insn->code) != BPF_JMP32)
+- return push_insn(t, t + 1, FALLTHROUGH, env, false);
++ BPF_CLASS(insn->code) != BPF_JMP32) {
++ insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
++ return push_insn(t, t + insn_sz, FALLTHROUGH, env, false);
++ }
+
+ switch (BPF_OP(insn->code)) {
+ case BPF_EXIT:
+@@ -14710,11 +14740,21 @@ static int check_cfg(struct bpf_verifier_env *env)
+ }
+
+ for (i = 0; i < insn_cnt; i++) {
++ struct bpf_insn *insn = &env->prog->insnsi[i];
++
+ if (insn_state[i] != EXPLORED) {
+ verbose(env, "unreachable insn %d\n", i);
+ ret = -EINVAL;
+ goto err_free;
+ }
++ if (bpf_is_ldimm64(insn)) {
++ if (insn_state[i + 1] != 0) {
++ verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
++ ret = -EINVAL;
++ goto err_free;
++ }
++ i++; /* skip second half of ldimm64 */
++ }
+ }
+ ret = 0; /* cfg looks good */
+
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index f55a40db065f7..096d30ff5c4ec 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -3836,14 +3836,6 @@ static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
+ return psi_trigger_poll(&ctx->psi.trigger, of->file, pt);
+ }
+
+-static int cgroup_pressure_open(struct kernfs_open_file *of)
+-{
+- if (of->file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
+- return -EPERM;
+-
+- return 0;
+-}
+-
+ static void cgroup_pressure_release(struct kernfs_open_file *of)
+ {
+ struct cgroup_file_ctx *ctx = of->priv;
+@@ -5243,7 +5235,6 @@ static struct cftype cgroup_psi_files[] = {
+ {
+ .name = "io.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_IO]),
+- .open = cgroup_pressure_open,
+ .seq_show = cgroup_io_pressure_show,
+ .write = cgroup_io_pressure_write,
+ .poll = cgroup_pressure_poll,
+@@ -5252,7 +5243,6 @@ static struct cftype cgroup_psi_files[] = {
+ {
+ .name = "memory.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_MEM]),
+- .open = cgroup_pressure_open,
+ .seq_show = cgroup_memory_pressure_show,
+ .write = cgroup_memory_pressure_write,
+ .poll = cgroup_pressure_poll,
+@@ -5261,7 +5251,6 @@ static struct cftype cgroup_psi_files[] = {
+ {
+ .name = "cpu.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_CPU]),
+- .open = cgroup_pressure_open,
+ .seq_show = cgroup_cpu_pressure_show,
+ .write = cgroup_cpu_pressure_write,
+ .poll = cgroup_pressure_poll,
+@@ -5271,7 +5260,6 @@ static struct cftype cgroup_psi_files[] = {
+ {
+ .name = "irq.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_IRQ]),
+- .open = cgroup_pressure_open,
+ .seq_show = cgroup_irq_pressure_show,
+ .write = cgroup_irq_pressure_write,
+ .poll = cgroup_pressure_poll,
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 26119d2154102..189ba5fd9af4b 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1503,11 +1503,14 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
+ /*
+ * Ensure that the control task does not run on the to be offlined
+ * CPU to prevent a deadlock against cfs_b->period_timer.
++ * Also keep at least one housekeeping cpu onlined to avoid generating
++ * an empty sched_domain span.
+ */
+- cpu = cpumask_any_but(cpu_online_mask, cpu);
+- if (cpu >= nr_cpu_ids)
+- return -EBUSY;
+- return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
++ for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
++ if (cpu != work.cpu)
++ return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
++ }
++ return -EBUSY;
+ }
+
+ static int cpu_down(unsigned int cpu, enum cpuhp_state target)
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index d5e9ccde3ab8e..3a904d8697c8f 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -1006,6 +1006,9 @@ void kgdb_panic(const char *msg)
+ if (panic_timeout)
+ return;
+
++ debug_locks_off();
++ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
++
+ if (dbg_kdb_mode)
+ kdb_printf("PANIC: %s\n", msg);
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e66398c9ffe05..271cb953fb52d 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4816,6 +4816,11 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
+ void *task_ctx_data = NULL;
+
+ if (!ctx->task) {
++ /*
++ * perf_pmu_migrate_context() / __perf_pmu_install_event()
++ * relies on the fact that find_get_pmu_context() cannot fail
++ * for CPU contexts.
++ */
+ struct perf_cpu_pmu_context *cpc;
+
+ cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
+@@ -12888,6 +12893,9 @@ static void __perf_pmu_install_event(struct pmu *pmu,
+ int cpu, struct perf_event *event)
+ {
+ struct perf_event_pmu_context *epc;
++ struct perf_event_context *old_ctx = event->ctx;
++
++ get_ctx(ctx); /* normally find_get_context() */
+
+ event->cpu = cpu;
+ epc = find_get_pmu_context(pmu, ctx, event);
+@@ -12896,6 +12904,11 @@ static void __perf_pmu_install_event(struct pmu *pmu,
+ if (event->state >= PERF_EVENT_STATE_OFF)
+ event->state = PERF_EVENT_STATE_INACTIVE;
+ perf_install_in_context(ctx, event, cpu);
++
++ /*
++ * Now that event->ctx is updated and visible, put the old ctx.
++ */
++ put_ctx(old_ctx);
+ }
+
+ static void __perf_pmu_install(struct perf_event_context *ctx,
+@@ -12934,6 +12947,10 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
+ struct perf_event_context *src_ctx, *dst_ctx;
+ LIST_HEAD(events);
+
++ /*
++ * Since per-cpu context is persistent, no need to grab an extra
++ * reference.
++ */
+ src_ctx = &per_cpu_ptr(&perf_cpu_context, src_cpu)->ctx;
+ dst_ctx = &per_cpu_ptr(&perf_cpu_context, dst_cpu)->ctx;
+
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index a0433f37b0243..4a260ceed9c73 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -699,6 +699,12 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
+ watermark = 0;
+ }
+
++ /*
++ * kcalloc_node() is unable to allocate buffer if the size is larger
++ * than: PAGE_SIZE << MAX_ORDER; directly bail out in this case.
++ */
++ if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER)
++ return -ENOMEM;
+ rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
+ node);
+ if (!rb->aux_pages)
+diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
+index 5971a66be0347..aae0402507ed7 100644
+--- a/kernel/irq/debugfs.c
++++ b/kernel/irq/debugfs.c
+@@ -121,7 +121,6 @@ static const struct irq_bit_descr irqdata_states[] = {
+ BIT_MASK_DESCR(IRQD_AFFINITY_ON_ACTIVATE),
+ BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
+ BIT_MASK_DESCR(IRQD_CAN_RESERVE),
+- BIT_MASK_DESCR(IRQD_MSI_NOMASK_QUIRK),
+
+ BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
+
+diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
+index c653cd31548d0..5a452b94b6434 100644
+--- a/kernel/irq/generic-chip.c
++++ b/kernel/irq/generic-chip.c
+@@ -544,21 +544,34 @@ EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
+ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
+ unsigned int clr, unsigned int set)
+ {
+- unsigned int i = gc->irq_base;
++ unsigned int i, virq;
+
+ raw_spin_lock(&gc_lock);
+ list_del(&gc->list);
+ raw_spin_unlock(&gc_lock);
+
+- for (; msk; msk >>= 1, i++) {
++ for (i = 0; msk; msk >>= 1, i++) {
+ if (!(msk & 0x01))
+ continue;
+
++ /*
++ * Interrupt domain based chips store the base hardware
++ * interrupt number in gc::irq_base. Otherwise gc::irq_base
++ * contains the base Linux interrupt number.
++ */
++ if (gc->domain) {
++ virq = irq_find_mapping(gc->domain, gc->irq_base + i);
++ if (!virq)
++ continue;
++ } else {
++ virq = gc->irq_base + i;
++ }
++
+ /* Remove handler first. That will mask the irq line */
+- irq_set_handler(i, NULL);
+- irq_set_chip(i, &no_irq_chip);
+- irq_set_chip_data(i, NULL);
+- irq_modify_status(i, clr, set);
++ irq_set_handler(virq, NULL);
++ irq_set_chip(virq, &no_irq_chip);
++ irq_set_chip_data(virq, NULL);
++ irq_modify_status(virq, clr, set);
+ }
+ }
+ EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
+diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
+index b4c31a5c11473..79b4a58ba9c3f 100644
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -1204,7 +1204,6 @@ static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
+
+ #define VIRQ_CAN_RESERVE 0x01
+ #define VIRQ_ACTIVATE 0x02
+-#define VIRQ_NOMASK_QUIRK 0x04
+
+ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
+ {
+@@ -1213,8 +1212,6 @@ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflag
+
+ if (!(vflags & VIRQ_CAN_RESERVE)) {
+ irqd_clr_can_reserve(irqd);
+- if (vflags & VIRQ_NOMASK_QUIRK)
+- irqd_set_msi_nomask_quirk(irqd);
+
+ /*
+ * If the interrupt is managed but no CPU is available to
+@@ -1275,15 +1272,8 @@ static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain
+ * Interrupt can use a reserved vector and will not occupy
+ * a real device vector until the interrupt is requested.
+ */
+- if (msi_check_reservation_mode(domain, info, dev)) {
++ if (msi_check_reservation_mode(domain, info, dev))
+ vflags |= VIRQ_CAN_RESERVE;
+- /*
+- * MSI affinity setting requires a special quirk (X86) when
+- * reservation mode is active.
+- */
+- if (info->flags & MSI_FLAG_NOMASK_QUIRK)
+- vflags |= VIRQ_NOMASK_QUIRK;
+- }
+
+ xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
+ if (!msi_desc_match(desc, MSI_DESC_NOTASSOCIATED))
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index 92d301f987766..f6067c1bb0893 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -242,7 +242,7 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
+ ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
+ return -EINVAL;
+
+- ksegments = memdup_user(segments, nr_segments * sizeof(ksegments[0]));
++ ksegments = memdup_array_user(segments, nr_segments, sizeof(ksegments[0]));
+ if (IS_ERR(ksegments))
+ return PTR_ERR(ksegments);
+
+diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
+index 949d3deae5062..270c7f80ce84c 100644
+--- a/kernel/locking/locktorture.c
++++ b/kernel/locking/locktorture.c
+@@ -45,6 +45,7 @@ torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
+ torture_param(int, rt_boost, 2,
+ "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
+ torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
++torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
+ torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
+ torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
+ /* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
+@@ -809,7 +810,8 @@ static int lock_torture_writer(void *arg)
+ bool skip_main_lock;
+
+ VERBOSE_TOROUT_STRING("lock_torture_writer task started");
+- set_user_nice(current, MAX_NICE);
++ if (!rt_task(current))
++ set_user_nice(current, MAX_NICE);
+
+ do {
+ if ((torture_random(&rand) & 0xfffff) == 0)
+@@ -1015,8 +1017,7 @@ static void lock_torture_cleanup(void)
+
+ if (writer_tasks) {
+ for (i = 0; i < cxt.nrealwriters_stress; i++)
+- torture_stop_kthread(lock_torture_writer,
+- writer_tasks[i]);
++ torture_stop_kthread(lock_torture_writer, writer_tasks[i]);
+ kfree(writer_tasks);
+ writer_tasks = NULL;
+ }
+@@ -1244,8 +1245,9 @@ static int __init lock_torture_init(void)
+ goto create_reader;
+
+ /* Create writer. */
+- firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
+- writer_tasks[i]);
++ firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i],
++ writer_tasks[i],
++ writer_fifo ? sched_set_fifo : NULL);
+ if (torture_init_error(firsterr))
+ goto unwind;
+
+diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
+index 93cca6e698600..7c5a8f05497f2 100644
+--- a/kernel/locking/test-ww_mutex.c
++++ b/kernel/locking/test-ww_mutex.c
+@@ -466,7 +466,6 @@ retry:
+ } while (!time_after(jiffies, stress->timeout));
+
+ kfree(order);
+- kfree(stress);
+ }
+
+ struct reorder_lock {
+@@ -531,7 +530,6 @@ out:
+ list_for_each_entry_safe(ll, ln, &locks, link)
+ kfree(ll);
+ kfree(order);
+- kfree(stress);
+ }
+
+ static void stress_one_work(struct work_struct *work)
+@@ -552,8 +550,6 @@ static void stress_one_work(struct work_struct *work)
+ break;
+ }
+ } while (!time_after(jiffies, stress->timeout));
+-
+- kfree(stress);
+ }
+
+ #define STRESS_INORDER BIT(0)
+@@ -564,15 +560,24 @@ static void stress_one_work(struct work_struct *work)
+ static int stress(int nlocks, int nthreads, unsigned int flags)
+ {
+ struct ww_mutex *locks;
+- int n;
++ struct stress *stress_array;
++ int n, count;
+
+ locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
+ if (!locks)
+ return -ENOMEM;
+
++ stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
++ GFP_KERNEL);
++ if (!stress_array) {
++ kfree(locks);
++ return -ENOMEM;
++ }
++
+ for (n = 0; n < nlocks; n++)
+ ww_mutex_init(&locks[n], &ww_class);
+
++ count = 0;
+ for (n = 0; nthreads; n++) {
+ struct stress *stress;
+ void (*fn)(struct work_struct *work);
+@@ -596,9 +601,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
+ if (!fn)
+ continue;
+
+- stress = kmalloc(sizeof(*stress), GFP_KERNEL);
+- if (!stress)
+- break;
++ stress = &stress_array[count++];
+
+ INIT_WORK(&stress->work, fn);
+ stress->locks = locks;
+@@ -613,6 +616,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
+
+ for (n = 0; n < nlocks; n++)
+ ww_mutex_destroy(&locks[n]);
++ kfree(stress_array);
+ kfree(locks);
+
+ return 0;
+diff --git a/kernel/padata.c b/kernel/padata.c
+index ff349e1084c1d..179fb1518070c 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -202,7 +202,7 @@ int padata_do_parallel(struct padata_shell *ps,
+ *cb_cpu = cpu;
+ }
+
+- err = -EBUSY;
++ err = -EBUSY;
+ if ((pinst->flags & PADATA_RESET))
+ goto out;
+
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index 0415d5ecb9772..e9ba7fc87d4ad 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -2474,8 +2474,9 @@ static void *get_highmem_page_buffer(struct page *page,
+ pbe->copy_page = tmp;
+ } else {
+ /* Copy of the page will be stored in normal memory */
+- kaddr = safe_pages_list;
+- safe_pages_list = safe_pages_list->next;
++ kaddr = __get_safe_page(ca->gfp_mask);
++ if (!kaddr)
++ return ERR_PTR(-ENOMEM);
+ pbe->copy_page = virt_to_page(kaddr);
+ }
+ pbe->next = highmem_pblist;
+@@ -2655,8 +2656,9 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
+ return ERR_PTR(-ENOMEM);
+ }
+ pbe->orig_address = page_address(page);
+- pbe->address = safe_pages_list;
+- safe_pages_list = safe_pages_list->next;
++ pbe->address = __get_safe_page(ca->gfp_mask);
++ if (!pbe->address)
++ return ERR_PTR(-ENOMEM);
+ pbe->next = restore_pblist;
+ restore_pblist = pbe;
+ return pbe->address;
+@@ -2687,8 +2689,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
+ if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
+ return 0;
+
+- handle->sync_read = 1;
+-
+ if (!handle->cur) {
+ if (!buffer)
+ /* This makes the buffer be freed by swsusp_free() */
+@@ -2724,7 +2724,6 @@ int snapshot_write_next(struct snapshot_handle *handle)
+ memory_bm_position_reset(&orig_bm);
+ restore_pblist = NULL;
+ handle->buffer = get_buffer(&orig_bm, &ca);
+- handle->sync_read = 0;
+ if (IS_ERR(handle->buffer))
+ return PTR_ERR(handle->buffer);
+ }
+@@ -2734,9 +2733,8 @@ int snapshot_write_next(struct snapshot_handle *handle)
+ handle->buffer = get_buffer(&orig_bm, &ca);
+ if (IS_ERR(handle->buffer))
+ return PTR_ERR(handle->buffer);
+- if (handle->buffer != buffer)
+- handle->sync_read = 0;
+ }
++ handle->sync_read = (handle->buffer == buffer);
+ handle->cur++;
+ return PAGE_SIZE;
+ }
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index 253ed509b6abb..25285893e44e7 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -223,7 +223,7 @@ static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
+ snp->grplo = cpu;
+ snp->grphi = cpu;
+ }
+- sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
++ sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo);
+ }
+ smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
+ return true;
+@@ -782,8 +782,7 @@ static void srcu_gp_start(struct srcu_struct *ssp)
+ spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
+- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
+- rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
++ WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
+ spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
+ WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
+ WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
+@@ -833,7 +832,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp
+ int cpu;
+
+ for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
+- if (!(mask & (1 << (cpu - snp->grplo))))
++ if (!(mask & (1UL << (cpu - snp->grplo))))
+ continue;
+ srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
+ }
+@@ -1719,6 +1718,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
+ ssp = sdp->ssp;
+ rcu_cblist_init(&ready_cbs);
+ spin_lock_irq_rcu_node(sdp);
++ WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
+ if (sdp->srcu_cblist_invoking ||
+@@ -1747,8 +1747,6 @@ static void srcu_invoke_callbacks(struct work_struct *work)
+ */
+ spin_lock_irq_rcu_node(sdp);
+ rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
+- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
+- rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
+ sdp->srcu_cblist_invoking = false;
+ more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
+ spin_unlock_irq_rcu_node(sdp);
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 1449cb69a0e0e..8f867fbf9b312 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -31,6 +31,7 @@
+ #include <linux/bitops.h>
+ #include <linux/export.h>
+ #include <linux/completion.h>
++#include <linux/kmemleak.h>
+ #include <linux/moduleparam.h>
+ #include <linux/panic.h>
+ #include <linux/panic_notifier.h>
+@@ -1552,10 +1553,22 @@ static bool rcu_gp_fqs_check_wake(int *gfp)
+ */
+ static void rcu_gp_fqs(bool first_time)
+ {
++ int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
+ struct rcu_node *rnp = rcu_get_root();
+
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
+ WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
++
++ WARN_ON_ONCE(nr_fqs > 3);
++ /* Only countdown nr_fqs for stall purposes if jiffies moves. */
++ if (nr_fqs) {
++ if (nr_fqs == 1) {
++ WRITE_ONCE(rcu_state.jiffies_stall,
++ jiffies + rcu_jiffies_till_stall_check());
++ }
++ WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
++ }
++
+ if (first_time) {
+ /* Collect dyntick-idle snapshots. */
+ force_qs_rnp(dyntick_save_progress_counter);
+@@ -3384,6 +3397,14 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
+ success = true;
+ }
+
++ /*
++ * The kvfree_rcu() caller considers the pointer freed at this point
++ * and likely removes any references to it. Since the actual slab
++ * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
++ * this object (no scanning or false positives reporting).
++ */
++ kmemleak_ignore(ptr);
++
+ // Set timer to drain after KFREE_DRAIN_JIFFIES.
+ if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
+ schedule_delayed_monitor_work(krcp);
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index 192536916f9a6..e9821a8422dbe 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -386,6 +386,10 @@ struct rcu_state {
+ /* in jiffies. */
+ unsigned long jiffies_stall; /* Time at which to check */
+ /* for CPU stalls. */
++ int nr_fqs_jiffies_stall; /* Number of fqs loops after
++ * which read jiffies and set
++ * jiffies_stall. Stall
++ * warnings disabled if !0. */
+ unsigned long jiffies_resched; /* Time at which to resched */
+ /* a reluctant CPU. */
+ unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
+diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
+index b10b8349bb2a4..a2fa6b22e2488 100644
+--- a/kernel/rcu/tree_stall.h
++++ b/kernel/rcu/tree_stall.h
+@@ -149,12 +149,17 @@ static void panic_on_rcu_stall(void)
+ /**
+ * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
+ *
++ * To perform the reset request from the caller, disable stall detection until
++ * 3 fqs loops have passed. This is required to ensure a fresh jiffies is
++ * loaded. It should be safe to do from the fqs loop as enough timer
++ * interrupts and context switches should have passed.
++ *
+ * The caller must disable hard irqs.
+ */
+ void rcu_cpu_stall_reset(void)
+ {
+- WRITE_ONCE(rcu_state.jiffies_stall,
+- jiffies + rcu_jiffies_till_stall_check());
++ WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 3);
++ WRITE_ONCE(rcu_state.jiffies_stall, ULONG_MAX);
+ }
+
+ //////////////////////////////////////////////////////////////////////////////
+@@ -170,6 +175,7 @@ static void record_gp_stall_check_time(void)
+ WRITE_ONCE(rcu_state.gp_start, j);
+ j1 = rcu_jiffies_till_stall_check();
+ smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
++ WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 0);
+ WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
+ rcu_state.jiffies_resched = j + j1 / 2;
+ rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
+@@ -725,6 +731,16 @@ static void check_cpu_stall(struct rcu_data *rdp)
+ !rcu_gp_in_progress())
+ return;
+ rcu_stall_kick_kthreads();
++
++ /*
++ * Check if it was requested (via rcu_cpu_stall_reset()) that the FQS
++ * loop has to set jiffies to ensure a non-stale jiffies value. This
++ * is required to have good jiffies value after coming out of long
++ * breaks of jiffies updates. Not doing so can cause false positives.
++ */
++ if (READ_ONCE(rcu_state.nr_fqs_jiffies_stall) > 0)
++ return;
++
+ j = jiffies;
+
+ /*
+diff --git a/kernel/reboot.c b/kernel/reboot.c
+index 3bba88c7ffc6b..6ebef11c88760 100644
+--- a/kernel/reboot.c
++++ b/kernel/reboot.c
+@@ -74,6 +74,7 @@ void __weak (*pm_power_off)(void);
+ void emergency_restart(void)
+ {
+ kmsg_dump(KMSG_DUMP_EMERG);
++ system_state = SYSTEM_RESTART;
+ machine_emergency_restart();
+ }
+ EXPORT_SYMBOL_GPL(emergency_restart);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 149fdb212e20f..460bf0e643842 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5377,8 +5377,6 @@ context_switch(struct rq *rq, struct task_struct *prev,
+ /* switch_mm_cid() requires the memory barriers above. */
+ switch_mm_cid(rq, prev, next);
+
+- rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
+-
+ prepare_lock_switch(rq, next, rf);
+
+ /* Here we just switch the register state and the stack. */
+@@ -6634,6 +6632,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ /* Promote REQ to ACT */
+ rq->clock_update_flags <<= 1;
+ update_rq_clock(rq);
++ rq->clock_update_flags = RQCF_UPDATED;
+
+ switch_count = &prev->nivcsw;
+
+@@ -6713,8 +6712,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ /* Also unlocks the rq: */
+ rq = context_switch(rq, prev, next, &rf);
+ } else {
+- rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
+-
+ rq_unpin_lock(rq, &rf);
+ __balance_callbacks(rq);
+ raw_spin_rq_unlock_irq(rq);
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 385179dae360e..0a9a3262f7822 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -168,6 +168,8 @@ static DEFINE_PER_CPU(void *, cur_csd_info);
+
+ static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
+ module_param(csd_lock_timeout, ulong, 0444);
++static int panic_on_ipistall; /* CSD panic timeout in milliseconds, 300000 for five minutes. */
++module_param(panic_on_ipistall, int, 0444);
+
+ static atomic_t csd_bug_count = ATOMIC_INIT(0);
+
+@@ -228,6 +230,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
+ }
+
+ ts2 = sched_clock();
++ /* How long since we last checked for a stuck CSD lock.*/
+ ts_delta = ts2 - *ts1;
+ if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0))
+ return false;
+@@ -241,9 +244,17 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
+ else
+ cpux = cpu;
+ cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
++ /* How long since this CSD lock was stuck. */
++ ts_delta = ts2 - ts0;
+ pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
+- firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
++ firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts_delta,
+ cpu, csd->func, csd->info);
++ /*
++ * If the CSD lock is still stuck after 5 minutes, it is unlikely
++ * to become unstuck. Use a signed comparison to avoid triggering
++ * on underflows when the TSC is out of sync between sockets.
++ */
++ BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC));
+ if (cpu_cur_csd && csd != cpu_cur_csd) {
+ pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
+ *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
+diff --git a/kernel/torture.c b/kernel/torture.c
+index 1a0519b836ac9..cd299ccc4e5d5 100644
+--- a/kernel/torture.c
++++ b/kernel/torture.c
+@@ -37,6 +37,7 @@
+ #include <linux/ktime.h>
+ #include <asm/byteorder.h>
+ #include <linux/torture.h>
++#include <linux/sched/rt.h>
+ #include "rcu/rcu.h"
+
+ MODULE_LICENSE("GPL");
+@@ -83,14 +84,15 @@ EXPORT_SYMBOL_GPL(verbose_torout_sleep);
+ * nanosecond random fuzz. This function and its friends desynchronize
+ * testing from the timer wheel.
+ */
+-int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp)
++int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode,
++ struct torture_random_state *trsp)
+ {
+ ktime_t hto = baset_ns;
+
+ if (trsp)
+ hto += (torture_random(trsp) >> 3) % fuzzt_ns;
+- set_current_state(TASK_UNINTERRUPTIBLE);
+- return schedule_hrtimeout(&hto, HRTIMER_MODE_REL);
++ set_current_state(TASK_IDLE);
++ return schedule_hrtimeout(&hto, mode);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_ns);
+
+@@ -102,7 +104,7 @@ int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state
+ {
+ ktime_t baset_ns = baset_us * NSEC_PER_USEC;
+
+- return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
++ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_us);
+
+@@ -119,7 +121,7 @@ int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state
+ fuzzt_ns = (u32)~0U;
+ else
+ fuzzt_ns = fuzzt_us * NSEC_PER_USEC;
+- return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
++ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_ms);
+
+@@ -132,7 +134,7 @@ int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp)
+ {
+ ktime_t baset_ns = jiffies_to_nsecs(baset_j);
+
+- return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), trsp);
++ return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_jiffies);
+
+@@ -149,7 +151,7 @@ int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *
+ fuzzt_ns = (u32)~0U;
+ else
+ fuzzt_ns = fuzzt_ms * NSEC_PER_MSEC;
+- return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
++ return torture_hrtimeout_ns(baset_ns, fuzzt_ns, HRTIMER_MODE_REL, trsp);
+ }
+ EXPORT_SYMBOL_GPL(torture_hrtimeout_s);
+
+@@ -711,7 +713,7 @@ static void torture_shutdown_cleanup(void)
+ * suddenly applied to or removed from the system.
+ */
+ static struct task_struct *stutter_task;
+-static int stutter_pause_test;
++static ktime_t stutter_till_abs_time;
+ static int stutter;
+ static int stutter_gap;
+
+@@ -721,30 +723,16 @@ static int stutter_gap;
+ */
+ bool stutter_wait(const char *title)
+ {
+- unsigned int i = 0;
+ bool ret = false;
+- int spt;
++ ktime_t till_ns;
+
+ cond_resched_tasks_rcu_qs();
+- spt = READ_ONCE(stutter_pause_test);
+- for (; spt; spt = READ_ONCE(stutter_pause_test)) {
+- if (!ret) {
+- sched_set_normal(current, MAX_NICE);
+- ret = true;
+- }
+- if (spt == 1) {
+- schedule_timeout_interruptible(1);
+- } else if (spt == 2) {
+- while (READ_ONCE(stutter_pause_test)) {
+- if (!(i++ & 0xffff))
+- torture_hrtimeout_us(10, 0, NULL);
+- cond_resched();
+- }
+- } else {
+- schedule_timeout_interruptible(round_jiffies_relative(HZ));
+- }
+- torture_shutdown_absorb(title);
++ till_ns = READ_ONCE(stutter_till_abs_time);
++ if (till_ns && ktime_before(ktime_get(), till_ns)) {
++ torture_hrtimeout_ns(till_ns, 0, HRTIMER_MODE_ABS, NULL);
++ ret = true;
+ }
++ torture_shutdown_absorb(title);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(stutter_wait);
+@@ -755,23 +743,16 @@ EXPORT_SYMBOL_GPL(stutter_wait);
+ */
+ static int torture_stutter(void *arg)
+ {
+- DEFINE_TORTURE_RANDOM(rand);
+- int wtime;
++ ktime_t till_ns;
+
+ VERBOSE_TOROUT_STRING("torture_stutter task started");
+ do {
+ if (!torture_must_stop() && stutter > 1) {
+- wtime = stutter;
+- if (stutter > 2) {
+- WRITE_ONCE(stutter_pause_test, 1);
+- wtime = stutter - 3;
+- torture_hrtimeout_jiffies(wtime, &rand);
+- wtime = 2;
+- }
+- WRITE_ONCE(stutter_pause_test, 2);
+- torture_hrtimeout_jiffies(wtime, NULL);
++ till_ns = ktime_add_ns(ktime_get(),
++ jiffies_to_nsecs(stutter));
++ WRITE_ONCE(stutter_till_abs_time, till_ns);
++ torture_hrtimeout_jiffies(stutter - 1, NULL);
+ }
+- WRITE_ONCE(stutter_pause_test, 0);
+ if (!torture_must_stop())
+ torture_hrtimeout_jiffies(stutter_gap, NULL);
+ torture_shutdown_absorb("torture_stutter");
+@@ -926,7 +907,7 @@ EXPORT_SYMBOL_GPL(torture_kthread_stopping);
+ * it starts, you will need to open-code your own.
+ */
+ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
+- char *f, struct task_struct **tp)
++ char *f, struct task_struct **tp, void (*cbf)(struct task_struct *tp))
+ {
+ int ret = 0;
+
+@@ -938,6 +919,10 @@ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
+ *tp = NULL;
+ return ret;
+ }
++
++ if (cbf)
++ cbf(*tp);
++
+ wake_up_process(*tp); // Process is sleeping, so ordering provided.
+ torture_shuffle_task_register(*tp);
+ return ret;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 81c4dade3763e..fa8bdedc7a067 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5000,6 +5000,20 @@ int tracing_open_file_tr(struct inode *inode, struct file *filp)
+ if (ret)
+ return ret;
+
++ mutex_lock(&event_mutex);
++
++ /* Fail if the file is marked for removal */
++ if (file->flags & EVENT_FILE_FL_FREED) {
++ trace_array_put(file->tr);
++ ret = -ENODEV;
++ } else {
++ event_file_get(file);
++ }
++
++ mutex_unlock(&event_mutex);
++ if (ret)
++ return ret;
++
+ filp->private_data = inode->i_private;
+
+ return 0;
+@@ -5010,6 +5024,7 @@ int tracing_release_file_tr(struct inode *inode, struct file *filp)
+ struct trace_event_file *file = inode->i_private;
+
+ trace_array_put(file->tr);
++ event_file_put(file);
+
+ return 0;
+ }
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index c98c3f42c3862..2e4717a741857 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1656,6 +1656,9 @@ extern void event_trigger_unregister(struct event_command *cmd_ops,
+ char *glob,
+ struct event_trigger_data *trigger_data);
+
++extern void event_file_get(struct trace_event_file *file);
++extern void event_file_put(struct trace_event_file *file);
++
+ /**
+ * struct event_trigger_ops - callbacks for trace event triggers
+ *
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 9841589b4af7f..2a9058c5068b5 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -990,26 +990,38 @@ static void remove_subsystem(struct trace_subsystem_dir *dir)
+ }
+ }
+
+-static void remove_event_file_dir(struct trace_event_file *file)
++void event_file_get(struct trace_event_file *file)
+ {
+- struct dentry *dir = file->dir;
+- struct dentry *child;
++ atomic_inc(&file->ref);
++}
+
+- if (dir) {
+- spin_lock(&dir->d_lock); /* probably unneeded */
+- list_for_each_entry(child, &dir->d_subdirs, d_child) {
+- if (d_really_is_positive(child)) /* probably unneeded */
+- d_inode(child)->i_private = NULL;
+- }
+- spin_unlock(&dir->d_lock);
++void event_file_put(struct trace_event_file *file)
++{
++ if (WARN_ON_ONCE(!atomic_read(&file->ref))) {
++ if (file->flags & EVENT_FILE_FL_FREED)
++ kmem_cache_free(file_cachep, file);
++ return;
++ }
+
+- tracefs_remove(dir);
++ if (atomic_dec_and_test(&file->ref)) {
++ /* Count should only go to zero when it is freed */
++ if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED)))
++ return;
++ kmem_cache_free(file_cachep, file);
+ }
++}
++
++static void remove_event_file_dir(struct trace_event_file *file)
++{
++ struct dentry *dir = file->dir;
++
++ tracefs_remove(dir);
+
+ list_del(&file->list);
+ remove_subsystem(file->system);
+ free_event_filter(file->filter);
+- kmem_cache_free(file_cachep, file);
++ file->flags |= EVENT_FILE_FL_FREED;
++ event_file_put(file);
+ }
+
+ /*
+@@ -1382,7 +1394,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
+ flags = file->flags;
+ mutex_unlock(&event_mutex);
+
+- if (!file)
++ if (!file || flags & EVENT_FILE_FL_FREED)
+ return -ENODEV;
+
+ if (flags & EVENT_FILE_FL_ENABLED &&
+@@ -1420,7 +1432,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ ret = -ENODEV;
+ mutex_lock(&event_mutex);
+ file = event_file_data(filp);
+- if (likely(file))
++ if (likely(file && !(file->flags & EVENT_FILE_FL_FREED)))
+ ret = ftrace_event_enable_disable(file, val);
+ mutex_unlock(&event_mutex);
+ break;
+@@ -1694,7 +1706,7 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
+
+ mutex_lock(&event_mutex);
+ file = event_file_data(filp);
+- if (file)
++ if (file && !(file->flags & EVENT_FILE_FL_FREED))
+ print_event_filter(file, s);
+ mutex_unlock(&event_mutex);
+
+@@ -2810,6 +2822,7 @@ trace_create_new_event(struct trace_event_call *call,
+ atomic_set(&file->tm_ref, 0);
+ INIT_LIST_HEAD(&file->triggers);
+ list_add(&file->list, &tr->events);
++ event_file_get(file);
+
+ return file;
+ }
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 1dad64267878c..5e2a422a58303 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -2088,6 +2088,9 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string)
+ struct event_filter *filter = NULL;
+ int err;
+
++ if (file->flags & EVENT_FILE_FL_FREED)
++ return -ENODEV;
++
+ if (!strcmp(strstrip(filter_string), "0")) {
+ filter_disable(file);
+ filter = event_filter(file);
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index 14cb275a0bab0..846e02c0fb59a 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -452,7 +452,7 @@ static unsigned int trace_string(struct synth_trace_event *entry,
+
+ #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ if ((unsigned long)str_val < TASK_SIZE)
+- ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX);
++ ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
+ else
+ #endif
+ ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
+diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
+index d0b6b390ee423..778b4056700ff 100644
+--- a/kernel/watch_queue.c
++++ b/kernel/watch_queue.c
+@@ -331,7 +331,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe,
+ filter.__reserved != 0)
+ return -EINVAL;
+
+- tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf));
++ tf = memdup_array_user(_filter->filters, filter.nr_filters, sizeof(*tf));
+ if (IS_ERR(tf))
+ return PTR_ERR(tf);
+
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index d145305d95fe8..5cd6d4e269157 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -283,6 +283,13 @@ static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
+ static DEFINE_PER_CPU(bool, softlockup_touch_sync);
+ static unsigned long soft_lockup_nmi_warn;
+
++static int __init softlockup_panic_setup(char *str)
++{
++ softlockup_panic = simple_strtoul(str, NULL, 0);
++ return 1;
++}
++__setup("softlockup_panic=", softlockup_panic_setup);
++
+ static int __init nowatchdog_setup(char *str)
+ {
+ watchdog_user_enabled = 0;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index e4a37d7a6752d..a7fcb25417726 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -5571,50 +5571,54 @@ static void work_for_cpu_fn(struct work_struct *work)
+ }
+
+ /**
+- * work_on_cpu - run a function in thread context on a particular cpu
++ * work_on_cpu_key - run a function in thread context on a particular cpu
+ * @cpu: the cpu to run on
+ * @fn: the function to run
+ * @arg: the function arg
++ * @key: The lock class key for lock debugging purposes
+ *
+ * It is up to the caller to ensure that the cpu doesn't go offline.
+ * The caller must not hold any locks which would prevent @fn from completing.
+ *
+ * Return: The value @fn returns.
+ */
+-long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
++long work_on_cpu_key(int cpu, long (*fn)(void *),
++ void *arg, struct lock_class_key *key)
+ {
+ struct work_for_cpu wfc = { .fn = fn, .arg = arg };
+
+- INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
++ INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
+ schedule_work_on(cpu, &wfc.work);
+ flush_work(&wfc.work);
+ destroy_work_on_stack(&wfc.work);
+ return wfc.ret;
+ }
+-EXPORT_SYMBOL_GPL(work_on_cpu);
++EXPORT_SYMBOL_GPL(work_on_cpu_key);
+
+ /**
+- * work_on_cpu_safe - run a function in thread context on a particular cpu
++ * work_on_cpu_safe_key - run a function in thread context on a particular cpu
+ * @cpu: the cpu to run on
+ * @fn: the function to run
+ * @arg: the function argument
++ * @key: The lock class key for lock debugging purposes
+ *
+ * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
+ * any locks which would prevent @fn from completing.
+ *
+ * Return: The value @fn returns.
+ */
+-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
++long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
++ void *arg, struct lock_class_key *key)
+ {
+ long ret = -ENODEV;
+
+ cpus_read_lock();
+ if (cpu_online(cpu))
+- ret = work_on_cpu(cpu, fn, arg);
++ ret = work_on_cpu_key(cpu, fn, arg, key);
+ cpus_read_unlock();
+ return ret;
+ }
+-EXPORT_SYMBOL_GPL(work_on_cpu_safe);
++EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
+ #endif /* CONFIG_SMP */
+
+ #ifdef CONFIG_FREEZER
+diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
+index f25eb111c0516..7dfa88282b006 100644
+--- a/lib/generic-radix-tree.c
++++ b/lib/generic-radix-tree.c
+@@ -166,6 +166,10 @@ void *__genradix_iter_peek(struct genradix_iter *iter,
+ struct genradix_root *r;
+ struct genradix_node *n;
+ unsigned level, i;
++
++ if (iter->offset == SIZE_MAX)
++ return NULL;
++
+ restart:
+ r = READ_ONCE(radix->root);
+ if (!r)
+@@ -184,10 +188,17 @@ restart:
+ (GENRADIX_ARY - 1);
+
+ while (!n->children[i]) {
++ size_t objs_per_ptr = genradix_depth_size(level);
++
++ if (iter->offset + objs_per_ptr < iter->offset) {
++ iter->offset = SIZE_MAX;
++ iter->pos = SIZE_MAX;
++ return NULL;
++ }
++
+ i++;
+- iter->offset = round_down(iter->offset +
+- genradix_depth_size(level),
+- genradix_depth_size(level));
++ iter->offset = round_down(iter->offset + objs_per_ptr,
++ objs_per_ptr);
+ iter->pos = (iter->offset >> PAGE_SHIFT) *
+ objs_per_page;
+ if (i == GENRADIX_ARY)
+diff --git a/mm/cma.c b/mm/cma.c
+index a4cfe995e11e7..f0c9d73ddb658 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -501,7 +501,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
+ */
+ if (page) {
+ for (i = 0; i < count; i++)
+- page_kasan_tag_reset(page + i);
++ page_kasan_tag_reset(nth_page(page, i));
+ }
+
+ if (ret && !no_warn) {
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index eb9580942a5c3..747af25ba9109 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -476,20 +476,14 @@ static unsigned int damon_age_for_new_attrs(unsigned int age,
+ static unsigned int damon_accesses_bp_to_nr_accesses(
+ unsigned int accesses_bp, struct damon_attrs *attrs)
+ {
+- unsigned int max_nr_accesses =
+- attrs->aggr_interval / attrs->sample_interval;
+-
+- return accesses_bp * max_nr_accesses / 10000;
++ return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
+ }
+
+ /* convert nr_accesses to access ratio in bp (per 10,000) */
+ static unsigned int damon_nr_accesses_to_accesses_bp(
+ unsigned int nr_accesses, struct damon_attrs *attrs)
+ {
+- unsigned int max_nr_accesses =
+- attrs->aggr_interval / attrs->sample_interval;
+-
+- return nr_accesses * 10000 / max_nr_accesses;
++ return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
+ }
+
+ static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
+diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
+index 7b8fce2f67a8d..3071e08e8b8f8 100644
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -193,9 +193,7 @@ static int damon_lru_sort_apply_parameters(void)
+ if (err)
+ return err;
+
+- /* aggr_interval / sample_interval is the maximum nr_accesses */
+- hot_thres = damon_lru_sort_mon_attrs.aggr_interval /
+- damon_lru_sort_mon_attrs.sample_interval *
++ hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
+ hot_thres_access_freq / 1000;
+ scheme = damon_lru_sort_new_hot_scheme(hot_thres);
+ if (!scheme)
+diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
+index e940802a15a41..5f27ba598350f 100644
+--- a/mm/damon/ops-common.c
++++ b/mm/damon/ops-common.c
+@@ -73,7 +73,6 @@ void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr
+ int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+ struct damos *s)
+ {
+- unsigned int max_nr_accesses;
+ int freq_subscore;
+ unsigned int age_in_sec;
+ int age_in_log, age_subscore;
+@@ -81,8 +80,8 @@ int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+ unsigned int age_weight = s->quota.weight_age;
+ int hotness;
+
+- max_nr_accesses = c->attrs.aggr_interval / c->attrs.sample_interval;
+- freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
++ freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE /
++ damon_max_nr_accesses(&c->attrs);
+
+ age_in_sec = (unsigned long)r->age * c->attrs.aggr_interval / 1000000;
+ for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
+diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
+index 50cf89dcd898b..ebc77c1baab96 100644
+--- a/mm/damon/sysfs-schemes.c
++++ b/mm/damon/sysfs-schemes.c
+@@ -125,6 +125,9 @@ damon_sysfs_scheme_regions_alloc(void)
+ struct damon_sysfs_scheme_regions *regions = kmalloc(sizeof(*regions),
+ GFP_KERNEL);
+
++ if (!regions)
++ return NULL;
++
+ regions->kobj = (struct kobject){};
+ INIT_LIST_HEAD(&regions->regions_list);
+ regions->nr_regions = 0;
+@@ -1649,6 +1652,8 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx,
+
+ sysfs_regions = sysfs_schemes->schemes_arr[schemes_idx]->tried_regions;
+ region = damon_sysfs_scheme_region_alloc(r);
++ if (!region)
++ return 0;
+ list_add_tail(&region->list, &sysfs_regions->regions_list);
+ sysfs_regions->nr_regions++;
+ if (kobject_init_and_add(&region->kobj,
+diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
+index df165820c6054..e25dd9ba81876 100644
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -1144,58 +1144,75 @@ destroy_targets_out:
+ return err;
+ }
+
+-/*
+- * Search a target in a context that corresponds to the sysfs target input.
+- *
+- * Return: pointer to the target if found, NULL if not found, or negative
+- * error code if the search failed.
+- */
+-static struct damon_target *damon_sysfs_existing_target(
+- struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
++static int damon_sysfs_update_target_pid(struct damon_target *target, int pid)
+ {
+- struct pid *pid;
+- struct damon_target *t;
++ struct pid *pid_new;
+
+- if (!damon_target_has_pid(ctx)) {
+- /* Up to only one target for paddr could exist */
+- damon_for_each_target(t, ctx)
+- return t;
+- return NULL;
++ pid_new = find_get_pid(pid);
++ if (!pid_new)
++ return -EINVAL;
++
++ if (pid_new == target->pid) {
++ put_pid(pid_new);
++ return 0;
+ }
+
+- /* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */
+- pid = find_get_pid(sys_target->pid);
+- if (!pid)
+- return ERR_PTR(-EINVAL);
+- damon_for_each_target(t, ctx) {
+- if (t->pid == pid) {
+- put_pid(pid);
+- return t;
+- }
++ put_pid(target->pid);
++ target->pid = pid_new;
++ return 0;
++}
++
++static int damon_sysfs_update_target(struct damon_target *target,
++ struct damon_ctx *ctx,
++ struct damon_sysfs_target *sys_target)
++{
++ int err;
++
++ if (damon_target_has_pid(ctx)) {
++ err = damon_sysfs_update_target_pid(target, sys_target->pid);
++ if (err)
++ return err;
+ }
+- put_pid(pid);
+- return NULL;
++
++ /*
++ * Do monitoring target region boundary update only if one or more
++ * regions are set by the user. This is for keeping current monitoring
++ * target results and range easier, especially for dynamic monitoring
++ * target regions update ops like 'vaddr'.
++ */
++ if (sys_target->regions->nr)
++ err = damon_sysfs_set_regions(target, sys_target->regions);
++ return err;
+ }
+
+ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
+ struct damon_sysfs_targets *sysfs_targets)
+ {
+- int i, err;
++ struct damon_target *t, *next;
++ int i = 0, err;
+
+ /* Multiple physical address space monitoring targets makes no sense */
+ if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
+ return -EINVAL;
+
+- for (i = 0; i < sysfs_targets->nr; i++) {
++ damon_for_each_target_safe(t, next, ctx) {
++ if (i < sysfs_targets->nr) {
++ err = damon_sysfs_update_target(t, ctx,
++ sysfs_targets->targets_arr[i]);
++ if (err)
++ return err;
++ } else {
++ if (damon_target_has_pid(ctx))
++ put_pid(t->pid);
++ damon_destroy_target(t);
++ }
++ i++;
++ }
++
++ for (; i < sysfs_targets->nr; i++) {
+ struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
+- struct damon_target *t = damon_sysfs_existing_target(st, ctx);
+-
+- if (IS_ERR(t))
+- return PTR_ERR(t);
+- if (!t)
+- err = damon_sysfs_add_target(st, ctx);
+- else
+- err = damon_sysfs_set_regions(t, st->regions);
++
++ err = damon_sysfs_add_target(st, ctx);
+ if (err)
+ return err;
+ }
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 164d22365bdee..dfffd1df12a1d 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2759,13 +2759,15 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ int nr = folio_nr_pages(folio);
+
+ xas_split(&xas, folio, folio_order(folio));
+- if (folio_test_swapbacked(folio)) {
+- __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
+- -nr);
+- } else {
+- __lruvec_stat_mod_folio(folio, NR_FILE_THPS,
+- -nr);
+- filemap_nr_thps_dec(mapping);
++ if (folio_test_pmd_mappable(folio)) {
++ if (folio_test_swapbacked(folio)) {
++ __lruvec_stat_mod_folio(folio,
++ NR_SHMEM_THPS, -nr);
++ } else {
++ __lruvec_stat_mod_folio(folio,
++ NR_FILE_THPS, -nr);
++ filemap_nr_thps_dec(mapping);
++ }
+ }
+ }
+
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 097b81c37597e..9951fb7412cc7 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -6521,13 +6521,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
+ struct page *page = NULL;
+ spinlock_t *ptl;
+ pte_t *pte, entry;
+-
+- /*
+- * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
+- * follow_hugetlb_page().
+- */
+- if (WARN_ON_ONCE(flags & FOLL_PIN))
+- return NULL;
++ int ret;
+
+ hugetlb_vma_lock_read(vma);
+ pte = hugetlb_walk(vma, haddr, huge_page_size(h));
+@@ -6537,8 +6531,23 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
+ ptl = huge_pte_lock(h, mm, pte);
+ entry = huge_ptep_get(pte);
+ if (pte_present(entry)) {
+- page = pte_page(entry) +
+- ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
++ page = pte_page(entry);
++
++ if (!huge_pte_write(entry)) {
++ if (flags & FOLL_WRITE) {
++ page = NULL;
++ goto out;
++ }
++
++ if (gup_must_unshare(vma, flags, page)) {
++ /* Tell the caller to do unsharing */
++ page = ERR_PTR(-EMLINK);
++ goto out;
++ }
++ }
++
++ page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT));
++
+ /*
+ * Note that page may be a sub-page, and with vmemmap
+ * optimizations the page struct may be read only.
+@@ -6548,8 +6557,10 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
+ * try_grab_page() should always be able to get the page here,
+ * because we hold the ptl lock and have verified pte_present().
+ */
+- if (try_grab_page(page, flags)) {
+- page = NULL;
++ ret = try_grab_page(page, flags);
++
++ if (WARN_ON_ONCE(ret)) {
++ page = ERR_PTR(ret);
+ goto out;
+ }
+ }
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 339dd2ccc9333..9f0c38c409cea 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2868,7 +2868,8 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
+ * Moreover, it should not come from DMA buffer and is not readily
+ * reclaimable. So those GFP bits should be masked off.
+ */
+-#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
++#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
++ __GFP_ACCOUNT | __GFP_NOFAIL)
+
+ /*
+ * mod_objcg_mlstate() may be called with irq enabled, so
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 3f231cf1b4106..dea98fc562c16 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1586,7 +1586,7 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
+ */
+ if (HPageMigratable(head))
+ goto found;
+- skip = compound_nr(head) - (page - head);
++ skip = compound_nr(head) - (pfn - page_to_pfn(head));
+ pfn += skip - 1;
+ }
+ return -ENOENT;
+diff --git a/net/9p/client.c b/net/9p/client.c
+index b0e7cb7e1a54a..e265a0ca6bddd 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -1981,7 +1981,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
+ goto error;
+ }
+ p9_debug(P9_DEBUG_9P,
+- ">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n",
++ ">>> TXATTRWALK file_fid %d, attr_fid %d name '%s'\n",
+ file_fid->fid, attr_fid->fid, attr_name);
+
+ req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds",
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index 00b684616e8d9..9374790f17ce4 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -832,14 +832,21 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
+ goto out_free_ts;
+ if (!(ts->rd->f_mode & FMODE_READ))
+ goto out_put_rd;
+- /* prevent workers from hanging on IO when fd is a pipe */
+- ts->rd->f_flags |= O_NONBLOCK;
++ /* Prevent workers from hanging on IO when fd is a pipe.
++ * It's technically possible for userspace or concurrent mounts to
++ * modify this flag concurrently, which will likely result in a
++ * broken filesystem. However, just having bad flags here should
++ * not crash the kernel or cause any other sort of bug, so mark this
++ * particular data race as intentional so that tooling (like KCSAN)
++ * can allow it and detect further problems.
++ */
++ data_race(ts->rd->f_flags |= O_NONBLOCK);
+ ts->wr = fget(wfd);
+ if (!ts->wr)
+ goto out_put_rd;
+ if (!(ts->wr->f_mode & FMODE_WRITE))
+ goto out_put_wr;
+- ts->wr->f_flags |= O_NONBLOCK;
++ data_race(ts->wr->f_flags |= O_NONBLOCK);
+
+ client->trans = ts;
+ client->status = Connected;
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 4e03642488230..c090627ff9751 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -172,13 +172,11 @@ static void hci_conn_cleanup(struct hci_conn *conn)
+ hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
+ }
+
+- hci_conn_del_sysfs(conn);
+-
+ debugfs_remove_recursive(conn->debugfs);
+
+- hci_dev_put(hdev);
++ hci_conn_del_sysfs(conn);
+
+- hci_conn_put(conn);
++ hci_dev_put(hdev);
+ }
+
+ static void hci_acl_create_connection(struct hci_conn *conn)
+diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
+index 15b33579007cb..367e32fe30eb8 100644
+--- a/net/bluetooth/hci_sysfs.c
++++ b/net/bluetooth/hci_sysfs.c
+@@ -35,7 +35,7 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
+ {
+ struct hci_dev *hdev = conn->hdev;
+
+- BT_DBG("conn %p", conn);
++ bt_dev_dbg(hdev, "conn %p", conn);
+
+ conn->dev.type = &bt_link;
+ conn->dev.class = &bt_class;
+@@ -48,27 +48,30 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
+ {
+ struct hci_dev *hdev = conn->hdev;
+
+- BT_DBG("conn %p", conn);
++ bt_dev_dbg(hdev, "conn %p", conn);
+
+ if (device_is_registered(&conn->dev))
+ return;
+
+ dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
+
+- if (device_add(&conn->dev) < 0) {
++ if (device_add(&conn->dev) < 0)
+ bt_dev_err(hdev, "failed to register connection device");
+- return;
+- }
+-
+- hci_dev_hold(hdev);
+ }
+
+ void hci_conn_del_sysfs(struct hci_conn *conn)
+ {
+ struct hci_dev *hdev = conn->hdev;
+
+- if (!device_is_registered(&conn->dev))
++ bt_dev_dbg(hdev, "conn %p", conn);
++
++ if (!device_is_registered(&conn->dev)) {
++ /* If device_add() has *not* succeeded, use *only* put_device()
++ * to drop the reference count.
++ */
++ put_device(&conn->dev);
+ return;
++ }
+
+ while (1) {
+ struct device *dev;
+@@ -80,9 +83,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
+ put_device(dev);
+ }
+
+- device_del(&conn->dev);
+-
+- hci_dev_put(hdev);
++ device_unregister(&conn->dev);
+ }
+
+ static void bt_host_release(struct device *dev)
+diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
+index 71056ee847736..0fcf357ea7ad3 100644
+--- a/net/bridge/netfilter/nf_conntrack_bridge.c
++++ b/net/bridge/netfilter/nf_conntrack_bridge.c
+@@ -37,7 +37,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
+ ktime_t tstamp = skb->tstamp;
+ struct ip_frag_state state;
+ struct iphdr *iph;
+- int err;
++ int err = 0;
+
+ /* for offloaded checksums cleanup checksum before fragmentation */
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+diff --git a/net/core/sock.c b/net/core/sock.c
+index eef27812013a4..6df04c705200a 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -600,7 +600,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
+ INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
+ dst, cookie) == NULL) {
+ sk_tx_queue_clear(sk);
+- sk->sk_dst_pending_confirm = 0;
++ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
+ dst_release(dst);
+ return NULL;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 60cffabfd4788..c8c2704a320f1 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -731,12 +731,12 @@ int __inet_hash(struct sock *sk, struct sock *osk)
+ if (err)
+ goto unlock;
+ }
++ sock_set_flag(sk, SOCK_RCU_FREE);
+ if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+ sk->sk_family == AF_INET6)
+ __sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
+ else
+ __sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
+- sock_set_flag(sk, SOCK_RCU_FREE);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ unlock:
+ spin_unlock(&ilb2->lock);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index afa819eede6a3..c2403fea8ec9a 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1316,7 +1316,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
+ refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+
+- skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
++ skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
+
+ /* Build TCP header and checksum it. */
+ th = (struct tcphdr *)skb->data;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 0e3a1753a51c6..715da615f0359 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -3121,6 +3121,10 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy,
+ else
+ *dbm = sdata->vif.bss_conf.txpower;
+
++ /* INT_MIN indicates no power level was set yet */
++ if (*dbm == INT_MIN)
++ return -EINVAL;
++
+ return 0;
+ }
+
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 5692daf57a4d8..c5988c7f9e81c 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1537,8 +1537,9 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
+ struct mptcp_pm_addr_entry *entry;
+
+ list_for_each_entry(entry, rm_list, list) {
+- remove_anno_list_by_saddr(msk, &entry->addr);
+- if (alist.nr < MPTCP_RM_IDS_MAX)
++ if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
++ lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
++ alist.nr < MPTCP_RM_IDS_MAX)
+ alist.ids[alist.nr++] = entry->addr.id;
+ }
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 636580c4736c9..974a096293d08 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1233,6 +1233,8 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
+ mptcp_do_fallback(ssk);
+ }
+
++#define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
++
+ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ struct mptcp_data_frag *dfrag,
+ struct mptcp_sendmsg_info *info)
+@@ -1259,6 +1261,8 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ return -EAGAIN;
+
+ /* compute send limit */
++ if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE))
++ ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE;
+ info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
+ copy = info->size_goal;
+
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index a3f1fe810cc96..7b878ea29e3a1 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -737,8 +737,11 @@ static int mptcp_setsockopt_v4_set_tos(struct mptcp_sock *msk, int optname,
+ val = inet_sk(sk)->tos;
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++ bool slow;
+
++ slow = lock_sock_fast(ssk);
+ __ip_sock_set_tos(ssk, val);
++ unlock_sock_fast(ssk, slow);
+ }
+ release_sock(sk);
+
+diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
+index f8854bff286cb..62fb1031763d1 100644
+--- a/net/ncsi/ncsi-aen.c
++++ b/net/ncsi/ncsi-aen.c
+@@ -89,11 +89,6 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
+ if ((had_link == has_link) || chained)
+ return 0;
+
+- if (had_link)
+- netif_carrier_off(ndp->ndev.dev);
+- else
+- netif_carrier_on(ndp->ndev.dev);
+-
+ if (!ndp->multi_package && !nc->package->multi_channel) {
+ if (had_link) {
+ ndp->flags |= NCSI_DEV_RESHUFFLE;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 8776266ba1532..db582c8d25f00 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6461,6 +6461,12 @@ static int nft_setelem_deactivate(const struct net *net,
+ return ret;
+ }
+
++static void nft_setelem_catchall_destroy(struct nft_set_elem_catchall *catchall)
++{
++ list_del_rcu(&catchall->list);
++ kfree_rcu(catchall, rcu);
++}
++
+ static void nft_setelem_catchall_remove(const struct net *net,
+ const struct nft_set *set,
+ const struct nft_set_elem *elem)
+@@ -6469,8 +6475,7 @@ static void nft_setelem_catchall_remove(const struct net *net,
+
+ list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
+ if (catchall->elem == elem->priv) {
+- list_del_rcu(&catchall->list);
+- kfree_rcu(catchall, rcu);
++ nft_setelem_catchall_destroy(catchall);
+ break;
+ }
+ }
+@@ -7202,10 +7207,11 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
+
+ if (err < 0) {
+ NL_SET_BAD_ATTR(extack, attr);
+- break;
++ return err;
+ }
+ }
+- return err;
++
++ return 0;
+ }
+
+ /*
+@@ -9631,9 +9637,8 @@ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
+ call_rcu(&trans->rcu, nft_trans_gc_trans_free);
+ }
+
+-static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+- unsigned int gc_seq,
+- bool sync)
++struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
++ unsigned int gc_seq)
+ {
+ struct nft_set_elem_catchall *catchall;
+ const struct nft_set *set = gc->set;
+@@ -9649,11 +9654,7 @@ static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
+
+ nft_set_elem_dead(ext);
+ dead_elem:
+- if (sync)
+- gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
+- else
+- gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+-
++ gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
+ if (!gc)
+ return NULL;
+
+@@ -9663,15 +9664,34 @@ dead_elem:
+ return gc;
+ }
+
+-struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
+- unsigned int gc_seq)
+-{
+- return nft_trans_gc_catchall(gc, gc_seq, false);
+-}
+-
+ struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
+ {
+- return nft_trans_gc_catchall(gc, 0, true);
++ struct nft_set_elem_catchall *catchall, *next;
++ const struct nft_set *set = gc->set;
++ struct nft_set_elem elem;
++ struct nft_set_ext *ext;
++
++ WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net));
++
++ list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
++ ext = nft_set_elem_ext(set, catchall->elem);
++
++ if (!nft_set_elem_expired(ext))
++ continue;
++
++ gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
++ if (!gc)
++ return NULL;
++
++ memset(&elem, 0, sizeof(elem));
++ elem.priv = catchall->elem;
++
++ nft_setelem_data_deactivate(gc->net, gc->set, &elem);
++ nft_setelem_catchall_destroy(catchall);
++ nft_trans_gc_elem_add(gc, elem.priv);
++ }
++
++ return gc;
+ }
+
+ static void nf_tables_module_autoload_cleanup(struct net *net)
+diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
+index e596d1a842f70..f6e791a681015 100644
+--- a/net/netfilter/nft_byteorder.c
++++ b/net/netfilter/nft_byteorder.c
+@@ -38,13 +38,14 @@ void nft_byteorder_eval(const struct nft_expr *expr,
+
+ switch (priv->size) {
+ case 8: {
++ u64 *dst64 = (void *)dst;
+ u64 src64;
+
+ switch (priv->op) {
+ case NFT_BYTEORDER_NTOH:
+ for (i = 0; i < priv->len / 8; i++) {
+ src64 = nft_reg_load64(&src[i]);
+- nft_reg_store64(&dst[i],
++ nft_reg_store64(&dst64[i],
+ be64_to_cpu((__force __be64)src64));
+ }
+ break;
+@@ -52,7 +53,7 @@ void nft_byteorder_eval(const struct nft_expr *expr,
+ for (i = 0; i < priv->len / 8; i++) {
+ src64 = (__force __u64)
+ cpu_to_be64(nft_reg_load64(&src[i]));
+- nft_reg_store64(&dst[i], src64);
++ nft_reg_store64(&dst64[i], src64);
+ }
+ break;
+ }
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
+index 8fdc7318c03c7..715484665a907 100644
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -63,7 +63,7 @@ nft_meta_get_eval_time(enum nft_meta_keys key,
+ {
+ switch (key) {
+ case NFT_META_TIME_NS:
+- nft_reg_store64(dest, ktime_get_real_ns());
++ nft_reg_store64((u64 *)dest, ktime_get_real_ns());
+ break;
+ case NFT_META_TIME_DAY:
+ nft_reg_store8(dest, nft_meta_weekday());
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index d131750663c3c..ea05d0b2df68a 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -1534,6 +1534,9 @@ static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
++ if (tcf_ct_helper(act))
++ return -EOPNOTSUPP;
++
+ entry->id = FLOW_ACTION_CT;
+ entry->ct.action = tcf_ct_action(act);
+ entry->ct.zone = tcf_ct_zone(act);
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 9fb0ccabc1a26..a148aa8003b88 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -111,7 +111,8 @@ static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
+
+ pipefs_sb = rpc_get_sb_net(net);
+ if (pipefs_sb) {
+- __rpc_clnt_remove_pipedir(clnt);
++ if (pipefs_sb == clnt->pipefs_sb)
++ __rpc_clnt_remove_pipedir(clnt);
+ rpc_put_sb_net(net);
+ }
+ }
+@@ -151,6 +152,8 @@ rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
+ {
+ struct dentry *dentry;
+
++ clnt->pipefs_sb = pipefs_sb;
++
+ if (clnt->cl_program->pipe_dir_name != NULL) {
+ dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
+ if (IS_ERR(dentry))
+@@ -2169,6 +2172,7 @@ call_connect_status(struct rpc_task *task)
+ task->tk_status = 0;
+ switch (status) {
+ case -ECONNREFUSED:
++ case -ECONNRESET:
+ /* A positive refusal suggests a rebind is needed. */
+ if (RPC_IS_SOFTCONN(task))
+ break;
+@@ -2177,7 +2181,6 @@ call_connect_status(struct rpc_task *task)
+ goto out_retry;
+ }
+ fallthrough;
+- case -ECONNRESET:
+ case -ECONNABORTED:
+ case -ENETDOWN:
+ case -ENETUNREACH:
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 5988a5c5ff3f0..102c3818bc54d 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -769,6 +769,10 @@ void rpcb_getport_async(struct rpc_task *task)
+
+ child = rpcb_call_async(rpcb_clnt, map, proc);
+ rpc_release_client(rpcb_clnt);
++ if (IS_ERR(child)) {
++ /* rpcb_map_release() has freed the arguments */
++ return;
++ }
+
+ xprt->stat.bind_count++;
+ rpc_put_task(child);
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index 85c8bcaebb80f..3b05f90a3e50d 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -852,7 +852,8 @@ out_readfail:
+ if (ret == -EINVAL)
+ svc_rdma_send_error(rdma_xprt, ctxt, ret);
+ svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
+- return ret;
++ svc_xprt_deferred_close(xprt);
++ return -ENOTCONN;
+
+ out_backchannel:
+ svc_rdma_handle_bc_reply(rqstp, ctxt);
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index 9b47c84092319..42d9586365ae3 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -102,6 +102,7 @@ static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
+ return -EMSGSIZE;
+
+ skb_put(skb, TLV_SPACE(len));
++ memset(tlv, 0, TLV_SPACE(len));
+ tlv->tlv_type = htons(type);
+ tlv->tlv_len = htons(TLV_LENGTH(len));
+ if (len && data)
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 3e8a04a136688..3e6eeacb13aec 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2553,15 +2553,16 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+
+ if (!(state->flags & MSG_PEEK))
+ WRITE_ONCE(u->oob_skb, NULL);
+-
++ else
++ skb_get(oob_skb);
+ unix_state_unlock(sk);
+
+ chunk = state->recv_actor(oob_skb, 0, chunk, state);
+
+- if (!(state->flags & MSG_PEEK)) {
++ if (!(state->flags & MSG_PEEK))
+ UNIXCB(oob_skb).consumed += 1;
+- kfree_skb(oob_skb);
+- }
++
++ consume_skb(oob_skb);
+
+ mutex_unlock(&u->iolock);
+
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 020cf17ab7e47..ccd8cefeea7ba 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -89,6 +89,7 @@
+ #include <linux/types.h>
+ #include <linux/bitops.h>
+ #include <linux/cred.h>
++#include <linux/errqueue.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+@@ -110,6 +111,7 @@
+ #include <linux/workqueue.h>
+ #include <net/sock.h>
+ #include <net/af_vsock.h>
++#include <uapi/linux/vm_sockets.h>
+
+ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
+ static void vsock_sk_destruct(struct sock *sk);
+@@ -2134,6 +2136,10 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int err;
+
+ sk = sock->sk;
++
++ if (unlikely(flags & MSG_ERRQUEUE))
++ return sock_recv_errqueue(sk, msg, len, SOL_VSOCK, VSOCK_RECVERR);
++
+ vsk = vsock_sk(sk);
+ err = 0;
+
+diff --git a/samples/bpf/syscall_tp_user.c b/samples/bpf/syscall_tp_user.c
+index 7a788bb837fc1..7a09ac74fac07 100644
+--- a/samples/bpf/syscall_tp_user.c
++++ b/samples/bpf/syscall_tp_user.c
+@@ -17,9 +17,9 @@
+
+ static void usage(const char *cmd)
+ {
+- printf("USAGE: %s [-i num_progs] [-h]\n", cmd);
+- printf(" -i num_progs # number of progs of the test\n");
+- printf(" -h # help\n");
++ printf("USAGE: %s [-i nr_tests] [-h]\n", cmd);
++ printf(" -i nr_tests # rounds of test to run\n");
++ printf(" -h # help\n");
+ }
+
+ static void verify_map(int map_id)
+@@ -45,14 +45,14 @@ static void verify_map(int map_id)
+ }
+ }
+
+-static int test(char *filename, int num_progs)
++static int test(char *filename, int nr_tests)
+ {
+- int map0_fds[num_progs], map1_fds[num_progs], fd, i, j = 0;
+- struct bpf_link *links[num_progs * 4];
+- struct bpf_object *objs[num_progs];
++ int map0_fds[nr_tests], map1_fds[nr_tests], fd, i, j = 0;
++ struct bpf_link **links = NULL;
++ struct bpf_object *objs[nr_tests];
+ struct bpf_program *prog;
+
+- for (i = 0; i < num_progs; i++) {
++ for (i = 0; i < nr_tests; i++) {
+ objs[i] = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(objs[i])) {
+ fprintf(stderr, "opening BPF object file failed\n");
+@@ -60,6 +60,19 @@ static int test(char *filename, int num_progs)
+ goto cleanup;
+ }
+
++ /* One-time initialization */
++ if (!links) {
++ int nr_progs = 0;
++
++ bpf_object__for_each_program(prog, objs[i])
++ nr_progs += 1;
++
++ links = calloc(nr_progs * nr_tests, sizeof(struct bpf_link *));
++
++ if (!links)
++ goto cleanup;
++ }
++
+ /* load BPF program */
+ if (bpf_object__load(objs[i])) {
+ fprintf(stderr, "loading BPF object file failed\n");
+@@ -101,14 +114,18 @@ static int test(char *filename, int num_progs)
+ close(fd);
+
+ /* verify the map */
+- for (i = 0; i < num_progs; i++) {
++ for (i = 0; i < nr_tests; i++) {
+ verify_map(map0_fds[i]);
+ verify_map(map1_fds[i]);
+ }
+
+ cleanup:
+- for (j--; j >= 0; j--)
+- bpf_link__destroy(links[j]);
++ if (links) {
++ for (j--; j >= 0; j--)
++ bpf_link__destroy(links[j]);
++
++ free(links);
++ }
+
+ for (i--; i >= 0; i--)
+ bpf_object__close(objs[i]);
+@@ -117,13 +134,13 @@ cleanup:
+
+ int main(int argc, char **argv)
+ {
+- int opt, num_progs = 1;
++ int opt, nr_tests = 1;
+ char filename[256];
+
+ while ((opt = getopt(argc, argv, "i:h")) != -1) {
+ switch (opt) {
+ case 'i':
+- num_progs = atoi(optarg);
++ nr_tests = atoi(optarg);
+ break;
+ case 'h':
+ default:
+@@ -134,5 +151,5 @@ int main(int argc, char **argv)
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+- return test(filename, num_progs);
++ return test(filename, nr_tests);
+ }
+diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux
+index 3cd6ca15f390d..c9f3e03124d7f 100644
+--- a/scripts/Makefile.vmlinux
++++ b/scripts/Makefile.vmlinux
+@@ -19,6 +19,7 @@ quiet_cmd_cc_o_c = CC $@
+
+ ifdef CONFIG_MODULES
+ KASAN_SANITIZE_.vmlinux.export.o := n
++KCSAN_SANITIZE_.vmlinux.export.o := n
+ GCOV_PROFILE_.vmlinux.export.o := n
+ targets += .vmlinux.export.o
+ vmlinux: .vmlinux.export.o
+diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
+index 951b74ba1b242..910bd21d08f48 100644
+--- a/scripts/gcc-plugins/randomize_layout_plugin.c
++++ b/scripts/gcc-plugins/randomize_layout_plugin.c
+@@ -191,12 +191,14 @@ static void partition_struct(tree *fields, unsigned long length, struct partitio
+
+ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
+ {
+- unsigned long i, x;
++ unsigned long i, x, index;
+ struct partition_group size_group[length];
+ unsigned long num_groups = 0;
+ unsigned long randnum;
+
+ partition_struct(newtree, length, (struct partition_group *)&size_group, &num_groups);
++
++ /* FIXME: this group shuffle is currently a no-op. */
+ for (i = num_groups - 1; i > 0; i--) {
+ struct partition_group tmp;
+ randnum = ranval(prng_state) % (i + 1);
+@@ -206,11 +208,14 @@ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prn
+ }
+
+ for (x = 0; x < num_groups; x++) {
+- for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start; i--) {
++ for (index = size_group[x].length - 1; index > 0; index--) {
+ tree tmp;
++
++ i = size_group[x].start + index;
+ if (DECL_BIT_FIELD_TYPE(newtree[i]))
+ continue;
+- randnum = ranval(prng_state) % (i + 1);
++ randnum = ranval(prng_state) % (index + 1);
++ randnum += size_group[x].start;
+ // we could handle this case differently if desired
+ if (DECL_BIT_FIELD_TYPE(newtree[randnum]))
+ continue;
+@@ -273,8 +278,6 @@ static bool is_flexible_array(const_tree field)
+ {
+ const_tree fieldtype;
+ const_tree typesize;
+- const_tree elemtype;
+- const_tree elemsize;
+
+ fieldtype = TREE_TYPE(field);
+ typesize = TYPE_SIZE(fieldtype);
+@@ -282,20 +285,12 @@ static bool is_flexible_array(const_tree field)
+ if (TREE_CODE(fieldtype) != ARRAY_TYPE)
+ return false;
+
+- elemtype = TREE_TYPE(fieldtype);
+- elemsize = TYPE_SIZE(elemtype);
+-
+ /* size of type is represented in bits */
+
+ if (typesize == NULL_TREE && TYPE_DOMAIN(fieldtype) != NULL_TREE &&
+ TYPE_MAX_VALUE(TYPE_DOMAIN(fieldtype)) == NULL_TREE)
+ return true;
+
+- if (typesize != NULL_TREE &&
+- (TREE_CONSTANT(typesize) && (!tree_to_uhwi(typesize) ||
+- tree_to_uhwi(typesize) == tree_to_uhwi(elemsize))))
+- return true;
+-
+ return false;
+ }
+
+diff --git a/security/integrity/iint.c b/security/integrity/iint.c
+index a462df827de2d..27ea19fb1f54c 100644
+--- a/security/integrity/iint.c
++++ b/security/integrity/iint.c
+@@ -66,9 +66,32 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
+ return iint;
+ }
+
+-static void iint_free(struct integrity_iint_cache *iint)
++#define IMA_MAX_NESTING (FILESYSTEM_MAX_STACK_DEPTH+1)
++
++/*
++ * It is not clear that IMA should be nested at all, but as long is it measures
++ * files both on overlayfs and on underlying fs, we need to annotate the iint
++ * mutex to avoid lockdep false positives related to IMA + overlayfs.
++ * See ovl_lockdep_annotate_inode_mutex_key() for more details.
++ */
++static inline void iint_lockdep_annotate(struct integrity_iint_cache *iint,
++ struct inode *inode)
++{
++#ifdef CONFIG_LOCKDEP
++ static struct lock_class_key iint_mutex_key[IMA_MAX_NESTING];
++
++ int depth = inode->i_sb->s_stack_depth;
++
++ if (WARN_ON_ONCE(depth < 0 || depth >= IMA_MAX_NESTING))
++ depth = 0;
++
++ lockdep_set_class(&iint->mutex, &iint_mutex_key[depth]);
++#endif
++}
++
++static void iint_init_always(struct integrity_iint_cache *iint,
++ struct inode *inode)
+ {
+- kfree(iint->ima_hash);
+ iint->ima_hash = NULL;
+ iint->version = 0;
+ iint->flags = 0UL;
+@@ -80,6 +103,14 @@ static void iint_free(struct integrity_iint_cache *iint)
+ iint->ima_creds_status = INTEGRITY_UNKNOWN;
+ iint->evm_status = INTEGRITY_UNKNOWN;
+ iint->measured_pcrs = 0;
++ mutex_init(&iint->mutex);
++ iint_lockdep_annotate(iint, inode);
++}
++
++static void iint_free(struct integrity_iint_cache *iint)
++{
++ kfree(iint->ima_hash);
++ mutex_destroy(&iint->mutex);
+ kmem_cache_free(iint_cache, iint);
+ }
+
+@@ -104,6 +135,8 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
+ if (!iint)
+ return NULL;
+
++ iint_init_always(iint, inode);
++
+ write_lock(&integrity_iint_lock);
+
+ p = &integrity_iint_tree.rb_node;
+@@ -153,25 +186,18 @@ void integrity_inode_free(struct inode *inode)
+ iint_free(iint);
+ }
+
+-static void init_once(void *foo)
++static void iint_init_once(void *foo)
+ {
+ struct integrity_iint_cache *iint = (struct integrity_iint_cache *) foo;
+
+ memset(iint, 0, sizeof(*iint));
+- iint->ima_file_status = INTEGRITY_UNKNOWN;
+- iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+- iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+- iint->ima_read_status = INTEGRITY_UNKNOWN;
+- iint->ima_creds_status = INTEGRITY_UNKNOWN;
+- iint->evm_status = INTEGRITY_UNKNOWN;
+- mutex_init(&iint->mutex);
+ }
+
+ static int __init integrity_iintcache_init(void)
+ {
+ iint_cache =
+ kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
+- 0, SLAB_PANIC, init_once);
++ 0, SLAB_PANIC, iint_init_once);
+ return 0;
+ }
+ DEFINE_LSM(integrity) = {
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index 452e80b541e54..597ea0c4d72f7 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -243,6 +243,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ {
+ const char *audit_cause = "failed";
+ struct inode *inode = file_inode(file);
++ struct inode *real_inode = d_real_inode(file_dentry(file));
+ const char *filename = file->f_path.dentry->d_name.name;
+ struct ima_max_digest_data hash;
+ struct kstat stat;
+@@ -302,6 +303,10 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ iint->ima_hash = tmpbuf;
+ memcpy(iint->ima_hash, &hash, length);
+ iint->version = i_version;
++ if (real_inode != inode) {
++ iint->real_ino = real_inode->i_ino;
++ iint->real_dev = real_inode->i_sb->s_dev;
++ }
+
+ /* Possibly temporary failure due to type of read (eg. O_DIRECT) */
+ if (!result)
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 365db0e43d7c2..cc1217ac2c6fa 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -25,6 +25,7 @@
+ #include <linux/xattr.h>
+ #include <linux/ima.h>
+ #include <linux/fs.h>
++#include <linux/iversion.h>
+
+ #include "ima.h"
+
+@@ -207,7 +208,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ u32 secid, char *buf, loff_t size, int mask,
+ enum ima_hooks func)
+ {
+- struct inode *inode = file_inode(file);
++ struct inode *backing_inode, *inode = file_inode(file);
+ struct integrity_iint_cache *iint = NULL;
+ struct ima_template_desc *template_desc = NULL;
+ char *pathbuf = NULL;
+@@ -284,6 +285,19 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ iint->measured_pcrs = 0;
+ }
+
++ /* Detect and re-evaluate changes made to the backing file. */
++ backing_inode = d_real_inode(file_dentry(file));
++ if (backing_inode != inode &&
++ (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) {
++ if (!IS_I_VERSION(backing_inode) ||
++ backing_inode->i_sb->s_dev != iint->real_dev ||
++ backing_inode->i_ino != iint->real_ino ||
++ !inode_eq_iversion(backing_inode, iint->version)) {
++ iint->flags &= ~IMA_DONE_MASK;
++ iint->measured_pcrs = 0;
++ }
++ }
++
+ /* Determine if already appraised/measured based on bitmask
+ * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
+ * IMA_AUDIT, IMA_AUDITED)
+diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
+index 7167a6e99bdc0..52c3c806bf69f 100644
+--- a/security/integrity/integrity.h
++++ b/security/integrity/integrity.h
+@@ -164,6 +164,8 @@ struct integrity_iint_cache {
+ unsigned long flags;
+ unsigned long measured_pcrs;
+ unsigned long atomic_flags;
++ unsigned long real_ino;
++ dev_t real_dev;
+ enum integrity_status ima_file_status:4;
+ enum integrity_status ima_mmap_status:4;
+ enum integrity_status ima_bprm_status:4;
+diff --git a/security/keys/trusted-keys/trusted_core.c b/security/keys/trusted-keys/trusted_core.c
+index 85fb5c22529a7..fee1ab2c734d3 100644
+--- a/security/keys/trusted-keys/trusted_core.c
++++ b/security/keys/trusted-keys/trusted_core.c
+@@ -358,17 +358,17 @@ static int __init init_trusted(void)
+ if (!get_random)
+ get_random = kernel_get_random;
+
+- static_call_update(trusted_key_seal,
+- trusted_key_sources[i].ops->seal);
+- static_call_update(trusted_key_unseal,
+- trusted_key_sources[i].ops->unseal);
+- static_call_update(trusted_key_get_random,
+- get_random);
+- trusted_key_exit = trusted_key_sources[i].ops->exit;
+- migratable = trusted_key_sources[i].ops->migratable;
+-
+ ret = trusted_key_sources[i].ops->init();
+- if (!ret)
++ if (!ret) {
++ static_call_update(trusted_key_seal, trusted_key_sources[i].ops->seal);
++ static_call_update(trusted_key_unseal, trusted_key_sources[i].ops->unseal);
++ static_call_update(trusted_key_get_random, get_random);
++
++ trusted_key_exit = trusted_key_sources[i].ops->exit;
++ migratable = trusted_key_sources[i].ops->migratable;
++ }
++
++ if (!ret || ret != -ENODEV)
+ break;
+ }
+
+diff --git a/security/keys/trusted-keys/trusted_tee.c b/security/keys/trusted-keys/trusted_tee.c
+index ac3e270ade69b..aa3d477de6db5 100644
+--- a/security/keys/trusted-keys/trusted_tee.c
++++ b/security/keys/trusted-keys/trusted_tee.c
+@@ -65,24 +65,16 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ int ret;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+- struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
++ struct tee_shm *reg_shm = NULL;
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+- reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
+- p->key_len);
+- if (IS_ERR(reg_shm_in)) {
+- dev_err(pvt_data.dev, "key shm register failed\n");
+- return PTR_ERR(reg_shm_in);
+- }
+-
+- reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
+- sizeof(p->blob));
+- if (IS_ERR(reg_shm_out)) {
+- dev_err(pvt_data.dev, "blob shm register failed\n");
+- ret = PTR_ERR(reg_shm_out);
+- goto out;
++ reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
++ sizeof(p->key) + sizeof(p->blob));
++ if (IS_ERR(reg_shm)) {
++ dev_err(pvt_data.dev, "shm register failed\n");
++ return PTR_ERR(reg_shm);
+ }
+
+ inv_arg.func = TA_CMD_SEAL;
+@@ -90,13 +82,13 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+- param[0].u.memref.shm = reg_shm_in;
++ param[0].u.memref.shm = reg_shm;
+ param[0].u.memref.size = p->key_len;
+ param[0].u.memref.shm_offs = 0;
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+- param[1].u.memref.shm = reg_shm_out;
++ param[1].u.memref.shm = reg_shm;
+ param[1].u.memref.size = sizeof(p->blob);
+- param[1].u.memref.shm_offs = 0;
++ param[1].u.memref.shm_offs = sizeof(p->key);
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+@@ -107,11 +99,7 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
+ p->blob_len = param[1].u.memref.size;
+ }
+
+-out:
+- if (reg_shm_out)
+- tee_shm_free(reg_shm_out);
+- if (reg_shm_in)
+- tee_shm_free(reg_shm_in);
++ tee_shm_free(reg_shm);
+
+ return ret;
+ }
+@@ -124,24 +112,16 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ int ret;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+- struct tee_shm *reg_shm_in = NULL, *reg_shm_out = NULL;
++ struct tee_shm *reg_shm = NULL;
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+- reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
+- p->blob_len);
+- if (IS_ERR(reg_shm_in)) {
+- dev_err(pvt_data.dev, "blob shm register failed\n");
+- return PTR_ERR(reg_shm_in);
+- }
+-
+- reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
+- sizeof(p->key));
+- if (IS_ERR(reg_shm_out)) {
+- dev_err(pvt_data.dev, "key shm register failed\n");
+- ret = PTR_ERR(reg_shm_out);
+- goto out;
++ reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
++ sizeof(p->key) + sizeof(p->blob));
++ if (IS_ERR(reg_shm)) {
++ dev_err(pvt_data.dev, "shm register failed\n");
++ return PTR_ERR(reg_shm);
+ }
+
+ inv_arg.func = TA_CMD_UNSEAL;
+@@ -149,11 +129,11 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ inv_arg.num_params = 4;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+- param[0].u.memref.shm = reg_shm_in;
++ param[0].u.memref.shm = reg_shm;
+ param[0].u.memref.size = p->blob_len;
+- param[0].u.memref.shm_offs = 0;
++ param[0].u.memref.shm_offs = sizeof(p->key);
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+- param[1].u.memref.shm = reg_shm_out;
++ param[1].u.memref.shm = reg_shm;
+ param[1].u.memref.size = sizeof(p->key);
+ param[1].u.memref.shm_offs = 0;
+
+@@ -166,11 +146,7 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
+ p->key_len = param[1].u.memref.size;
+ }
+
+-out:
+- if (reg_shm_out)
+- tee_shm_free(reg_shm_out);
+- if (reg_shm_in)
+- tee_shm_free(reg_shm_in);
++ tee_shm_free(reg_shm);
+
+ return ret;
+ }
+diff --git a/sound/core/info.c b/sound/core/info.c
+index 0b2f04dcb5897..e2f302e55bbb2 100644
+--- a/sound/core/info.c
++++ b/sound/core/info.c
+@@ -56,7 +56,7 @@ struct snd_info_private_data {
+ };
+
+ static int snd_info_version_init(void);
+-static void snd_info_disconnect(struct snd_info_entry *entry);
++static void snd_info_clear_entries(struct snd_info_entry *entry);
+
+ /*
+
+@@ -569,11 +569,16 @@ void snd_info_card_disconnect(struct snd_card *card)
+ {
+ if (!card)
+ return;
+- mutex_lock(&info_mutex);
++
+ proc_remove(card->proc_root_link);
+- card->proc_root_link = NULL;
+ if (card->proc_root)
+- snd_info_disconnect(card->proc_root);
++ proc_remove(card->proc_root->p);
++
++ mutex_lock(&info_mutex);
++ if (card->proc_root)
++ snd_info_clear_entries(card->proc_root);
++ card->proc_root_link = NULL;
++ card->proc_root = NULL;
+ mutex_unlock(&info_mutex);
+ }
+
+@@ -745,15 +750,14 @@ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card,
+ }
+ EXPORT_SYMBOL(snd_info_create_card_entry);
+
+-static void snd_info_disconnect(struct snd_info_entry *entry)
++static void snd_info_clear_entries(struct snd_info_entry *entry)
+ {
+ struct snd_info_entry *p;
+
+ if (!entry->p)
+ return;
+ list_for_each_entry(p, &entry->children, list)
+- snd_info_disconnect(p);
+- proc_remove(entry->p);
++ snd_info_clear_entries(p);
+ entry->p = NULL;
+ }
+
+@@ -770,8 +774,9 @@ void snd_info_free_entry(struct snd_info_entry * entry)
+ if (!entry)
+ return;
+ if (entry->p) {
++ proc_remove(entry->p);
+ mutex_lock(&info_mutex);
+- snd_info_disconnect(entry);
++ snd_info_clear_entries(entry);
+ mutex_unlock(&info_mutex);
+ }
+
+diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
+index 2633a4bb1d85d..214a0680524b0 100644
+--- a/sound/hda/hdac_stream.c
++++ b/sound/hda/hdac_stream.c
+@@ -354,8 +354,10 @@ struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
+ struct hdac_stream *res = NULL;
+
+ /* make a non-zero unique key for the substream */
+- int key = (substream->pcm->device << 16) | (substream->number << 2) |
+- (substream->stream + 1);
++ int key = (substream->number << 2) | (substream->stream + 1);
++
++ if (substream->pcm)
++ key |= (substream->pcm->device << 16);
+
+ spin_lock_irq(&bus->reg_lock);
+ list_for_each_entry(azx_dev, &bus->stream_list, list) {
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 7f1d79f450a2a..7375998538040 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9670,6 +9670,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
++ SND_PCI_QUIRK(0x103c, 0x890e, "HP 255 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x8919, "HP Pavilion Aero Laptop 13-be0xxx", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x896d, "HP ZBook Firefly 16 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9705,6 +9706,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8b2f, "HP 255 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b43, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b44, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9738,12 +9740,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
+ SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK),
+ SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+@@ -9798,6 +9804,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
++ SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+@@ -10639,22 +10646,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x12, 0x90a60130},
+ {0x17, 0x90170110},
+ {0x21, 0x03211020}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+- {0x14, 0x90170110},
+- {0x21, 0x04211020}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+- {0x14, 0x90170110},
+- {0x21, 0x04211030}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+- ALC295_STANDARD_PINS,
+- {0x17, 0x21014020},
+- {0x18, 0x21a19030}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+- ALC295_STANDARD_PINS,
+- {0x17, 0x21014040},
+- {0x18, 0x21a19050}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+- ALC295_STANDARD_PINS),
+ SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC298_STANDARD_PINS,
+ {0x17, 0x90170110}),
+@@ -10698,6 +10689,9 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
+ SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ {0x19, 0x40000000},
+ {0x1b, 0x40000000}),
++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
++ {0x19, 0x40000000},
++ {0x1b, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x19, 0x40000000},
+ {0x1a, 0x40000000}),
+diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
+index 8ba7dc89daaaa..5f106be184a87 100644
+--- a/sound/soc/codecs/lpass-wsa-macro.c
++++ b/sound/soc/codecs/lpass-wsa-macro.c
+@@ -1685,6 +1685,9 @@ static int wsa_macro_spk_boost_event(struct snd_soc_dapm_widget *w,
+ boost_path_cfg1 = CDC_WSA_RX1_RX_PATH_CFG1;
+ reg = CDC_WSA_RX1_RX_PATH_CTL;
+ reg_mix = CDC_WSA_RX1_RX_PATH_MIX_CTL;
++ } else {
++ dev_warn(component->dev, "Incorrect widget name in the driver\n");
++ return -EINVAL;
+ }
+
+ switch (event) {
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index e40d583a1ce64..4ea550b0601be 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -1203,9 +1203,6 @@ static int wsa883x_spkr_event(struct snd_soc_dapm_widget *w,
+ break;
+ }
+
+- snd_soc_component_write_field(component, WSA883X_DRE_CTL_1,
+- WSA883X_DRE_GAIN_EN_MASK,
+- WSA883X_DRE_GAIN_FROM_CSR);
+ if (wsa883x->port_enable[WSA883X_PORT_COMP])
+ snd_soc_component_write_field(component, WSA883X_DRE_CTL_0,
+ WSA883X_DRE_OFFSET_MASK,
+@@ -1218,9 +1215,6 @@ static int wsa883x_spkr_event(struct snd_soc_dapm_widget *w,
+ snd_soc_component_write_field(component, WSA883X_PDM_WD_CTL,
+ WSA883X_PDM_EN_MASK,
+ WSA883X_PDM_ENABLE);
+- snd_soc_component_write_field(component, WSA883X_PA_FSM_CTL,
+- WSA883X_GLOBAL_PA_EN_MASK,
+- WSA883X_GLOBAL_PA_ENABLE);
+
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+@@ -1346,6 +1340,7 @@ static const struct snd_soc_dai_ops wsa883x_dai_ops = {
+ .hw_free = wsa883x_hw_free,
+ .mute_stream = wsa883x_digital_mute,
+ .set_stream = wsa883x_set_sdw_stream,
++ .mute_unmute_on_trigger = true,
+ };
+
+ static struct snd_soc_dai_driver wsa883x_dais[] = {
+diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+index cdcbf04b8832f..5e2ec60e2954b 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+@@ -75,6 +75,39 @@ static struct snd_soc_acpi_mach *cht_ess8316_quirk(void *arg)
+ return arg;
+ }
+
++/*
++ * The Lenovo Yoga Tab 3 Pro YT3-X90, with Android factory OS has a buggy DSDT
++ * with the coded not being listed at all.
++ */
++static const struct dmi_system_id lenovo_yoga_tab3_x90[] = {
++ {
++ /* Lenovo Yoga Tab 3 Pro YT3-X90, codec missing from DSDT */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
++ },
++ },
++ { }
++};
++
++static struct snd_soc_acpi_mach cht_lenovo_yoga_tab3_x90_mach = {
++ .id = "10WM5102",
++ .drv_name = "bytcr_wm5102",
++ .fw_filename = "intel/fw_sst_22a8.bin",
++ .board = "bytcr_wm5102",
++ .sof_tplg_filename = "sof-cht-wm5102.tplg",
++};
++
++static struct snd_soc_acpi_mach *lenovo_yt3_x90_quirk(void *arg)
++{
++ if (dmi_check_system(lenovo_yoga_tab3_x90))
++ return &cht_lenovo_yoga_tab3_x90_mach;
++
++ /* Skip wildcard match snd_soc_acpi_intel_cherrytrail_machines[] entry */
++ return NULL;
++}
++
+ static const struct snd_soc_acpi_codecs rt5640_comp_ids = {
+ .num_codecs = 2,
+ .codecs = { "10EC5640", "10EC3276" },
+@@ -175,6 +208,16 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
+ .drv_name = "sof_pcm512x",
+ .sof_tplg_filename = "sof-cht-src-50khz-pcm512x.tplg",
+ },
++ /*
++ * Special case for the Lenovo Yoga Tab 3 Pro YT3-X90 where the DSDT
++ * misses the codec. Match on the SST id instead, lenovo_yt3_x90_quirk()
++ * will return a YT3 specific mach or NULL when called on other hw,
++ * skipping this entry.
++ */
++ {
++ .id = "808622A8",
++ .machine_quirk = lenovo_yt3_x90_quirk,
++ },
+
+ #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
+ /*
+diff --git a/sound/soc/mediatek/mt8188/mt8188-mt6359.c b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+index ac69c23e0da1c..7048ff52ab86a 100644
+--- a/sound/soc/mediatek/mt8188/mt8188-mt6359.c
++++ b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+@@ -246,6 +246,11 @@ static const struct snd_soc_dapm_widget mt8188_mt6359_widgets[] = {
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SINK("HDMI"),
+ SND_SOC_DAPM_SINK("DP"),
++
++ /* dynamic pinctrl */
++ SND_SOC_DAPM_PINCTRL("ETDM_SPK_PIN", "aud_etdm_spk_on", "aud_etdm_spk_off"),
++ SND_SOC_DAPM_PINCTRL("ETDM_HP_PIN", "aud_etdm_hp_on", "aud_etdm_hp_off"),
++ SND_SOC_DAPM_PINCTRL("MTKAIF_PIN", "aud_mtkaif_on", "aud_mtkaif_off"),
+ };
+
+ static const struct snd_kcontrol_new mt8188_mt6359_controls[] = {
+@@ -267,6 +272,7 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
+ snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+ struct snd_soc_component *cmpnt_codec =
+ asoc_rtd_to_codec(rtd, 0)->component;
++ struct snd_soc_dapm_widget *pin_w = NULL, *w;
+ struct mtk_base_afe *afe;
+ struct mt8188_afe_private *afe_priv;
+ struct mtkaif_param *param;
+@@ -306,6 +312,18 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
+ return 0;
+ }
+
++ for_each_card_widgets(rtd->card, w) {
++ if (!strcmp(w->name, "MTKAIF_PIN")) {
++ pin_w = w;
++ break;
++ }
++ }
++
++ if (pin_w)
++ dapm_pinctrl_event(pin_w, NULL, SND_SOC_DAPM_PRE_PMU);
++ else
++ dev_dbg(afe->dev, "%s(), no pinmux widget, please check if default on\n", __func__);
++
+ pm_runtime_get_sync(afe->dev);
+ mt6359_mtkaif_calibration_enable(cmpnt_codec);
+
+@@ -403,6 +421,9 @@ static int mt8188_mt6359_mtkaif_calibration(struct snd_soc_pcm_runtime *rtd)
+ for (i = 0; i < MT8188_MTKAIF_MISO_NUM; i++)
+ param->mtkaif_phase_cycle[i] = mtkaif_phase_cycle[i];
+
++ if (pin_w)
++ dapm_pinctrl_event(pin_w, NULL, SND_SOC_DAPM_POST_PMD);
++
+ dev_dbg(afe->dev, "%s(), end, calibration ok %d\n",
+ __func__, param->mtkaif_calibration_ok);
+
+diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
+index 02dd64dea1792..28d8c6c3d3b26 100644
+--- a/sound/soc/soc-dai.c
++++ b/sound/soc/soc-dai.c
+@@ -641,6 +641,10 @@ int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream,
+ ret = soc_dai_trigger(dai, substream, cmd);
+ if (ret < 0)
+ break;
++
++ if (dai->driver->ops && dai->driver->ops->mute_unmute_on_trigger)
++ snd_soc_dai_digital_mute(dai, 0, substream->stream);
++
+ soc_dai_mark_push(dai, substream, trigger);
+ }
+ break;
+@@ -651,6 +655,9 @@ int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream,
+ if (rollback && !soc_dai_mark_match(dai, substream, trigger))
+ continue;
+
++ if (dai->driver->ops && dai->driver->ops->mute_unmute_on_trigger)
++ snd_soc_dai_digital_mute(dai, 1, substream->stream);
++
+ r = soc_dai_trigger(dai, substream, cmd);
+ if (r < 0)
+ ret = r; /* use last ret */
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 6cf4cd667d036..4cb710a4cea42 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -896,8 +896,10 @@ static int __soc_pcm_prepare(struct snd_soc_pcm_runtime *rtd,
+ snd_soc_dapm_stream_event(rtd, substream->stream,
+ SND_SOC_DAPM_STREAM_START);
+
+- for_each_rtd_dais(rtd, i, dai)
+- snd_soc_dai_digital_mute(dai, 0, substream->stream);
++ for_each_rtd_dais(rtd, i, dai) {
++ if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
++ snd_soc_dai_digital_mute(dai, 0, substream->stream);
++ }
+
+ out:
+ return soc_pcm_ret(rtd, ret);
+@@ -939,8 +941,10 @@ static int soc_pcm_hw_clean(struct snd_soc_pcm_runtime *rtd,
+ if (snd_soc_dai_active(dai) == 1)
+ soc_pcm_set_dai_params(dai, NULL);
+
+- if (snd_soc_dai_stream_active(dai, substream->stream) == 1)
+- snd_soc_dai_digital_mute(dai, 1, substream->stream);
++ if (snd_soc_dai_stream_active(dai, substream->stream) == 1) {
++ if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
++ snd_soc_dai_digital_mute(dai, 1, substream->stream);
++ }
+ }
+
+ /* run the stream event */
+diff --git a/sound/soc/sof/ipc4.c b/sound/soc/sof/ipc4.c
+index ab6eddd91bb77..1b09496733fb8 100644
+--- a/sound/soc/sof/ipc4.c
++++ b/sound/soc/sof/ipc4.c
+@@ -614,6 +614,9 @@ static void sof_ipc4_rx_msg(struct snd_sof_dev *sdev)
+ case SOF_IPC4_NOTIFY_LOG_BUFFER_STATUS:
+ sof_ipc4_mtrace_update_pos(sdev, SOF_IPC4_LOG_CORE_GET(ipc4_msg->primary));
+ break;
++ case SOF_IPC4_NOTIFY_EXCEPTION_CAUGHT:
++ snd_sof_dsp_panic(sdev, 0, true);
++ break;
+ default:
+ dev_dbg(sdev->dev, "Unhandled DSP message: %#x|%#x\n",
+ ipc4_msg->primary, ipc4_msg->extension);
+diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
+index e5405f854a910..563fe6f7789f7 100644
+--- a/sound/soc/sof/sof-audio.c
++++ b/sound/soc/sof/sof-audio.c
+@@ -1032,6 +1032,13 @@ int sof_machine_check(struct snd_sof_dev *sdev)
+ mach = snd_sof_machine_select(sdev);
+ if (mach) {
+ sof_pdata->machine = mach;
++
++ if (sof_pdata->subsystem_id_set) {
++ mach->mach_params.subsystem_vendor = sof_pdata->subsystem_vendor;
++ mach->mach_params.subsystem_device = sof_pdata->subsystem_device;
++ mach->mach_params.subsystem_id_set = true;
++ }
++
+ snd_sof_set_mach_params(mach, sdev);
+ return 0;
+ }
+diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
+index f42c85df88a80..69a2352f2e1a0 100644
+--- a/sound/soc/sof/sof-pci-dev.c
++++ b/sound/soc/sof/sof-pci-dev.c
+@@ -221,6 +221,14 @@ int sof_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ return ret;
+
+ sof_pdata->name = pci_name(pci);
++
++ /* PCI defines a vendor ID of 0xFFFF as invalid. */
++ if (pci->subsystem_vendor != 0xFFFF) {
++ sof_pdata->subsystem_vendor = pci->subsystem_vendor;
++ sof_pdata->subsystem_device = pci->subsystem_device;
++ sof_pdata->subsystem_id_set = true;
++ }
++
+ sof_pdata->desc = desc;
+ sof_pdata->dev = dev;
+
+diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c
+index 21fa7b9787997..94c514e57eef9 100644
+--- a/sound/soc/ti/omap-mcbsp.c
++++ b/sound/soc/ti/omap-mcbsp.c
+@@ -74,14 +74,16 @@ static int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id)
+ return -EINVAL;
+ }
+
+- pm_runtime_put_sync(mcbsp->dev);
++ if (mcbsp->active)
++ pm_runtime_put_sync(mcbsp->dev);
+
+ r = clk_set_parent(mcbsp->fclk, fck_src);
+ if (r)
+ dev_err(mcbsp->dev, "CLKS: could not clk_set_parent() to %s\n",
+ src);
+
+- pm_runtime_get_sync(mcbsp->dev);
++ if (mcbsp->active)
++ pm_runtime_get_sync(mcbsp->dev);
+
+ clk_put(fck_src);
+
+diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
+index 3c36aeade991e..9a85c69782bdd 100644
+--- a/tools/include/uapi/linux/prctl.h
++++ b/tools/include/uapi/linux/prctl.h
+@@ -283,7 +283,7 @@ struct prctl_mm_map {
+
+ /* Memory deny write / execute */
+ #define PR_SET_MDWE 65
+-# define PR_MDWE_REFUSE_EXEC_GAIN 1
++# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0)
+
+ #define PR_GET_MDWE 66
+
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index dbf0bc71a63be..f38893e0b0369 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -1512,9 +1512,11 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
+ } else if (ptq->state->flags & INTEL_PT_ASYNC) {
+ if (!ptq->state->to_ip)
+ ptq->flags = PERF_IP_FLAG_BRANCH |
++ PERF_IP_FLAG_ASYNC |
+ PERF_IP_FLAG_TRACE_END;
+ else if (ptq->state->from_nr && !ptq->state->to_nr)
+ ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
++ PERF_IP_FLAG_ASYNC |
+ PERF_IP_FLAG_VMEXIT;
+ else
+ ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 8a36ba5df9f90..ce9860e388bd4 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -2180,7 +2180,7 @@ retry:
+ if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
+ if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
+ return -7;
+- } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
++ } else if (do_knl_cstates && soft_c1_residency_display(BIC_CPU_c6)) {
+ if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
+ return -7;
+ }
+@@ -5790,6 +5790,7 @@ void process_cpuid()
+ rapl_probe(family, model);
+ perf_limit_reasons_probe(family, model);
+ automatic_cstate_conversion_probe(family, model);
++ prewake_cstate_probe(family, model);
+
+ check_tcc_offset(model_orig);
+
+diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
+index fb6ab9cef84f7..b885462999022 100644
+--- a/tools/testing/cxl/test/cxl.c
++++ b/tools/testing/cxl/test/cxl.c
+@@ -831,7 +831,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
+ cxld->interleave_ways = 2;
+ else
+ cxld->interleave_ways = 1;
+- cxld->interleave_granularity = 256;
++ cxld->interleave_granularity = 4096;
+ cxld->hpa_range = (struct range) {
+ .start = base,
+ .end = base + size - 1,
+diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c
+index f9297900cea6d..78f19c255f20b 100644
+--- a/tools/testing/selftests/bpf/verifier/ld_imm64.c
++++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c
+@@ -9,8 +9,8 @@
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+- .errstr = "invalid BPF_LD_IMM insn",
+- .errstr_unpriv = "R1 pointer comparison",
++ .errstr = "jump into the middle of ldimm64 insn 1",
++ .errstr_unpriv = "jump into the middle of ldimm64 insn 1",
+ .result = REJECT,
+ },
+ {
+@@ -23,8 +23,8 @@
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+- .errstr = "invalid BPF_LD_IMM insn",
+- .errstr_unpriv = "R1 pointer comparison",
++ .errstr = "jump into the middle of ldimm64 insn 1",
++ .errstr_unpriv = "jump into the middle of ldimm64 insn 1",
+ .result = REJECT,
+ },
+ {
+diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c
+index e60cf4da8fb07..1c61e3c022cb8 100644
+--- a/tools/testing/selftests/clone3/clone3.c
++++ b/tools/testing/selftests/clone3/clone3.c
+@@ -196,7 +196,12 @@ int main(int argc, char *argv[])
+ CLONE3_ARGS_NO_TEST);
+
+ /* Do a clone3() in a new time namespace */
+- test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
++ if (access("/proc/self/ns/time", F_OK) == 0) {
++ test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
++ } else {
++ ksft_print_msg("Time namespaces are not supported\n");
++ ksft_test_result_skip("Skipping clone3() with CLONE_NEWTIME\n");
++ }
+
+ /* Do a clone3() with exit signal (SIGCHLD) in flags */
+ test_clone3(SIGCHLD, 0, -EINVAL, CLONE3_ARGS_NO_TEST);
+diff --git a/tools/testing/selftests/efivarfs/create-read.c b/tools/testing/selftests/efivarfs/create-read.c
+index 9674a19396a32..7bc7af4eb2c17 100644
+--- a/tools/testing/selftests/efivarfs/create-read.c
++++ b/tools/testing/selftests/efivarfs/create-read.c
+@@ -32,8 +32,10 @@ int main(int argc, char **argv)
+ rc = read(fd, buf, sizeof(buf));
+ if (rc != 0) {
+ fprintf(stderr, "Reading a new var should return EOF\n");
++ close(fd);
+ return EXIT_FAILURE;
+ }
+
++ close(fd);
+ return EXIT_SUCCESS;
+ }
+diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config
+index 5d52f64dfb430..7afe05e8c4d79 100644
+--- a/tools/testing/selftests/lkdtm/config
++++ b/tools/testing/selftests/lkdtm/config
+@@ -9,7 +9,6 @@ CONFIG_INIT_ON_FREE_DEFAULT_ON=y
+ CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+ CONFIG_UBSAN=y
+ CONFIG_UBSAN_BOUNDS=y
+-CONFIG_UBSAN_TRAP=y
+ CONFIG_STACKPROTECTOR_STRONG=y
+ CONFIG_SLUB_DEBUG=y
+ CONFIG_SLUB_DEBUG_ON=y
+diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
+index 607b8d7e3ea34..2f3a1b96da6e3 100644
+--- a/tools/testing/selftests/lkdtm/tests.txt
++++ b/tools/testing/selftests/lkdtm/tests.txt
+@@ -7,7 +7,7 @@ EXCEPTION
+ #EXHAUST_STACK Corrupts memory on failure
+ #CORRUPT_STACK Crashes entire system on success
+ #CORRUPT_STACK_STRONG Crashes entire system on success
+-ARRAY_BOUNDS
++ARRAY_BOUNDS call trace:|UBSAN: array-index-out-of-bounds
+ CORRUPT_LIST_ADD list_add corruption
+ CORRUPT_LIST_DEL list_del corruption
+ STACK_GUARD_PAGE_LEADING
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 5a02fef4b070c..78003187524d4 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3207,7 +3207,7 @@ fastclose_tests()
+ if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
+ test_linkfail=1024 addr_nr_ns2=fastclose_server \
+ run_tests $ns1 $ns2 10.0.1.1
+- chk_join_nr 0 0 0
++ chk_join_nr 0 0 0 0 0 0 1
+ chk_fclose_nr 1 1 invert
+ chk_rst_nr 1 1
+ fi
+diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
+index 5073dbc961258..2deac2031de9e 100644
+--- a/tools/testing/selftests/resctrl/Makefile
++++ b/tools/testing/selftests/resctrl/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+
+-CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
++CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2 -D_GNU_SOURCE
+ CFLAGS += $(KHDR_INCLUDES)
+
+ TEST_GEN_PROGS := resctrl_tests
+diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c
+index af71b21412710..37f50252dead1 100644
+--- a/tools/testing/selftests/resctrl/cmt_test.c
++++ b/tools/testing/selftests/resctrl/cmt_test.c
+@@ -90,9 +90,6 @@ int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
+ if (ret)
+ return ret;
+
+- if (!validate_resctrl_feature_request(CMT_STR))
+- return -1;
+-
+ ret = get_cbm_mask("L3", cbm_mask);
+ if (ret)
+ return ret;
+diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
+index cde3781a9ab05..bdb36509beb74 100644
+--- a/tools/testing/selftests/resctrl/mba_test.c
++++ b/tools/testing/selftests/resctrl/mba_test.c
+@@ -12,7 +12,7 @@
+
+ #define RESULT_FILE_NAME "result_mba"
+ #define NUM_OF_RUNS 5
+-#define MAX_DIFF_PERCENT 5
++#define MAX_DIFF_PERCENT 8
+ #define ALLOCATION_MAX 100
+ #define ALLOCATION_MIN 10
+ #define ALLOCATION_STEP 10
+diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
+index 538d35a6485ac..299eaed3edf1d 100644
+--- a/tools/testing/selftests/resctrl/mbm_test.c
++++ b/tools/testing/selftests/resctrl/mbm_test.c
+@@ -11,7 +11,7 @@
+ #include "resctrl.h"
+
+ #define RESULT_FILE_NAME "result_mbm"
+-#define MAX_DIFF_PERCENT 5
++#define MAX_DIFF_PERCENT 8
+ #define NUM_OF_RUNS 5
+
+ static int
+diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
+index f455f0b7e314b..d8029cb474c9f 100644
+--- a/tools/testing/selftests/resctrl/resctrl.h
++++ b/tools/testing/selftests/resctrl/resctrl.h
+@@ -1,5 +1,4 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-#define _GNU_SOURCE
+ #ifndef RESCTRL_H
+ #define RESCTRL_H
+ #include <stdio.h>
+diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
+index ab1eab1e7ff63..f27e5c2a5171f 100644
+--- a/tools/testing/selftests/resctrl/resctrl_val.c
++++ b/tools/testing/selftests/resctrl/resctrl_val.c
+@@ -482,7 +482,7 @@ void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
+ */
+ int signal_handler_register(void)
+ {
+- struct sigaction sigact;
++ struct sigaction sigact = {};
+ int ret = 0;
+
+ sigact.sa_sigaction = ctrlc_handler;
+@@ -504,7 +504,7 @@ int signal_handler_register(void)
+ */
+ void signal_handler_unregister(void)
+ {
+- struct sigaction sigact;
++ struct sigaction sigact = {};
+
+ sigact.sa_handler = SIG_DFL;
+ sigemptyset(&sigact.sa_mask);