summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2020-02-14 18:56:32 -0500
committerMike Pagano <mpagano@gentoo.org>2020-02-14 18:56:32 -0500
commitfc793d41bdad23c353174f56a79f97eb170ea76e (patch)
tree2d0bfdd4e4068afe7155da5aeac8f9f1e10b28ff
parentLinux patch 5.5.3 (diff)
downloadlinux-patches-fc793d41bdad23c353174f56a79f97eb170ea76e.tar.gz
linux-patches-fc793d41bdad23c353174f56a79f97eb170ea76e.tar.bz2
linux-patches-fc793d41bdad23c353174f56a79f97eb170ea76e.zip
Linux patch 5.5.45.5-6
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1003_linux-5.5.4.patch4385
2 files changed, 4389 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index d7385d1a..567c784c 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch: 1002_linux-5.5.3.patch
From: http://www.kernel.org
Desc: Linux 5.5.3
+Patch: 1003_linux-5.5.4.patch
+From: http://www.kernel.org
+Desc: Linux 5.5.4
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1003_linux-5.5.4.patch b/1003_linux-5.5.4.patch
new file mode 100644
index 00000000..1390447e
--- /dev/null
+++ b/1003_linux-5.5.4.patch
@@ -0,0 +1,4385 @@
+diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
+index 6eb33207a167..5117ad68a584 100644
+--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
++++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
+@@ -82,7 +82,7 @@ properties:
+ Must be the device tree identifier of the over-sampling
+ mode pins. As the line is active high, it should be marked
+ GPIO_ACTIVE_HIGH.
+- maxItems: 1
++ maxItems: 3
+
+ adi,sw-mode:
+ description:
+@@ -125,9 +125,9 @@ examples:
+ adi,conversion-start-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 27 GPIO_ACTIVE_HIGH>;
+ adi,first-data-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
+- adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH
+- &gpio 23 GPIO_ACTIVE_HIGH
+- &gpio 26 GPIO_ACTIVE_HIGH>;
++ adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>,
++ <&gpio 23 GPIO_ACTIVE_HIGH>,
++ <&gpio 26 GPIO_ACTIVE_HIGH>;
+ standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>;
+ adi,sw-mode;
+ };
+diff --git a/Makefile b/Makefile
+index fdaa1e262320..62f956e9c81d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 5
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
+index f9a5c9ddcae7..1d109b06e7d8 100644
+--- a/arch/arc/boot/dts/axs10x_mb.dtsi
++++ b/arch/arc/boot/dts/axs10x_mb.dtsi
+@@ -78,6 +78,7 @@
+ interrupt-names = "macirq";
+ phy-mode = "rgmii";
+ snps,pbl = < 32 >;
++ snps,multicast-filter-bins = <256>;
+ clocks = <&apbclk>;
+ clock-names = "stmmaceth";
+ max-speed = <100>;
+diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
+index 091356f2a8c1..c726cd8dbdf1 100644
+--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
++++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
+@@ -704,6 +704,60 @@
+ ti,bit-shift = <8>;
+ reg = <0x2a48>;
+ };
++
++ clkout1_osc_div_ck: clkout1-osc-div-ck {
++ #clock-cells = <0>;
++ compatible = "ti,divider-clock";
++ clocks = <&sys_clkin_ck>;
++ ti,bit-shift = <20>;
++ ti,max-div = <4>;
++ reg = <0x4100>;
++ };
++
++ clkout1_src2_mux_ck: clkout1-src2-mux-ck {
++ #clock-cells = <0>;
++ compatible = "ti,mux-clock";
++ clocks = <&clk_rc32k_ck>, <&sysclk_div>, <&dpll_ddr_m2_ck>,
++ <&dpll_per_m2_ck>, <&dpll_disp_m2_ck>,
++ <&dpll_mpu_m2_ck>;
++ reg = <0x4100>;
++ };
++
++ clkout1_src2_pre_div_ck: clkout1-src2-pre-div-ck {
++ #clock-cells = <0>;
++ compatible = "ti,divider-clock";
++ clocks = <&clkout1_src2_mux_ck>;
++ ti,bit-shift = <4>;
++ ti,max-div = <8>;
++ reg = <0x4100>;
++ };
++
++ clkout1_src2_post_div_ck: clkout1-src2-post-div-ck {
++ #clock-cells = <0>;
++ compatible = "ti,divider-clock";
++ clocks = <&clkout1_src2_pre_div_ck>;
++ ti,bit-shift = <8>;
++ ti,max-div = <32>;
++ ti,index-power-of-two;
++ reg = <0x4100>;
++ };
++
++ clkout1_mux_ck: clkout1-mux-ck {
++ #clock-cells = <0>;
++ compatible = "ti,mux-clock";
++ clocks = <&clkout1_osc_div_ck>, <&clk_rc32k_ck>,
++ <&clkout1_src2_post_div_ck>, <&dpll_extdev_m2_ck>;
++ ti,bit-shift = <16>;
++ reg = <0x4100>;
++ };
++
++ clkout1_ck: clkout1-ck {
++ #clock-cells = <0>;
++ compatible = "ti,gate-clock";
++ clocks = <&clkout1_mux_ck>;
++ ti,bit-shift = <23>;
++ reg = <0x4100>;
++ };
+ };
+
+ &prcm {
+diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
+index dee9c0c8a096..16c6fd3c4246 100644
+--- a/arch/arm/boot/dts/at91sam9260.dtsi
++++ b/arch/arm/boot/dts/at91sam9260.dtsi
+@@ -187,7 +187,7 @@
+ usart0 {
+ pinctrl_usart0: usart0-0 {
+ atmel,pins =
+- <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE
++ <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -221,7 +221,7 @@
+ usart1 {
+ pinctrl_usart1: usart1-0 {
+ atmel,pins =
+- <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE
++ <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -239,7 +239,7 @@
+ usart2 {
+ pinctrl_usart2: usart2-0 {
+ atmel,pins =
+- <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE
++ <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -257,7 +257,7 @@
+ usart3 {
+ pinctrl_usart3: usart3-0 {
+ atmel,pins =
+- <AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_NONE
++ <AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOB 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -275,7 +275,7 @@
+ uart0 {
+ pinctrl_uart0: uart0-0 {
+ atmel,pins =
+- <AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_NONE
++ <AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_PULL_UP
+ AT91_PIOA 30 AT91_PERIPH_B AT91_PINCTRL_PULL_UP>;
+ };
+ };
+@@ -283,7 +283,7 @@
+ uart1 {
+ pinctrl_uart1: uart1-0 {
+ atmel,pins =
+- <AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_NONE
++ <AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+ };
+diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
+index dba025a98527..5ed3d745ac86 100644
+--- a/arch/arm/boot/dts/at91sam9261.dtsi
++++ b/arch/arm/boot/dts/at91sam9261.dtsi
+@@ -329,7 +329,7 @@
+ usart0 {
+ pinctrl_usart0: usart0-0 {
+ atmel,pins =
+- <AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE>,
++ <AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
+ <AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -347,7 +347,7 @@
+ usart1 {
+ pinctrl_usart1: usart1-0 {
+ atmel,pins =
+- <AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_NONE>,
++ <AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
+ <AT91_PIOC 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -365,7 +365,7 @@
+ usart2 {
+ pinctrl_usart2: usart2-0 {
+ atmel,pins =
+- <AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_NONE>,
++ <AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
+ <AT91_PIOC 15 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
+index 99678abdda93..5c990cfae254 100644
+--- a/arch/arm/boot/dts/at91sam9263.dtsi
++++ b/arch/arm/boot/dts/at91sam9263.dtsi
+@@ -183,7 +183,7 @@
+ usart0 {
+ pinctrl_usart0: usart0-0 {
+ atmel,pins =
+- <AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_NONE
++ <AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOA 27 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -201,7 +201,7 @@
+ usart1 {
+ pinctrl_usart1: usart1-0 {
+ atmel,pins =
+- <AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_NONE
++ <AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOD 1 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -219,7 +219,7 @@
+ usart2 {
+ pinctrl_usart2: usart2-0 {
+ atmel,pins =
+- <AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_NONE
++ <AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOD 3 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
+index 691c95ea6175..fd179097a4bf 100644
+--- a/arch/arm/boot/dts/at91sam9g45.dtsi
++++ b/arch/arm/boot/dts/at91sam9g45.dtsi
+@@ -556,7 +556,7 @@
+ usart0 {
+ pinctrl_usart0: usart0-0 {
+ atmel,pins =
+- <AT91_PIOB 19 AT91_PERIPH_A AT91_PINCTRL_NONE
++ <AT91_PIOB 19 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOB 18 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -574,7 +574,7 @@
+ usart1 {
+ pinctrl_usart1: usart1-0 {
+ atmel,pins =
+- <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE
++ <AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -592,7 +592,7 @@
+ usart2 {
+ pinctrl_usart2: usart2-0 {
+ atmel,pins =
+- <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE
++ <AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -610,7 +610,7 @@
+ usart3 {
+ pinctrl_usart3: usart3-0 {
+ atmel,pins =
+- <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE
++ <AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+ AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
+index 8643b7151565..ea024e4b6e09 100644
+--- a/arch/arm/boot/dts/at91sam9rl.dtsi
++++ b/arch/arm/boot/dts/at91sam9rl.dtsi
+@@ -682,7 +682,7 @@
+ usart0 {
+ pinctrl_usart0: usart0-0 {
+ atmel,pins =
+- <AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE>,
++ <AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
+ <AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -721,7 +721,7 @@
+ usart1 {
+ pinctrl_usart1: usart1-0 {
+ atmel,pins =
+- <AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE>,
++ <AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
+ <AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -744,7 +744,7 @@
+ usart2 {
+ pinctrl_usart2: usart2-0 {
+ atmel,pins =
+- <AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE>,
++ <AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
+ <AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+@@ -767,7 +767,7 @@
+ usart3 {
+ pinctrl_usart3: usart3-0 {
+ atmel,pins =
+- <AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_NONE>,
++ <AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
+ <AT91_PIOB 1 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+ };
+
+diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
+index 3c534cd50ee3..db2033f674c6 100644
+--- a/arch/arm/boot/dts/meson8.dtsi
++++ b/arch/arm/boot/dts/meson8.dtsi
+@@ -129,8 +129,8 @@
+ gpu_opp_table: gpu-opp-table {
+ compatible = "operating-points-v2";
+
+- opp-182150000 {
+- opp-hz = /bits/ 64 <182150000>;
++ opp-182142857 {
++ opp-hz = /bits/ 64 <182142857>;
+ opp-microvolt = <1150000>;
+ };
+ opp-318750000 {
+diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
+index 099bf8e711c9..1e8c5d7bc824 100644
+--- a/arch/arm/boot/dts/meson8b.dtsi
++++ b/arch/arm/boot/dts/meson8b.dtsi
+@@ -125,8 +125,8 @@
+ opp-hz = /bits/ 64 <255000000>;
+ opp-microvolt = <1100000>;
+ };
+- opp-364300000 {
+- opp-hz = /bits/ 64 <364300000>;
++ opp-364285714 {
++ opp-hz = /bits/ 64 <364285714>;
+ opp-microvolt = <1100000>;
+ };
+ opp-425000000 {
+diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
+index f770aace0efd..203d40be70a5 100644
+--- a/arch/arm/boot/dts/sama5d3.dtsi
++++ b/arch/arm/boot/dts/sama5d3.dtsi
+@@ -1188,49 +1188,49 @@
+ usart0_clk: usart0_clk {
+ #clock-cells = <0>;
+ reg = <12>;
+- atmel,clk-output-range = <0 66000000>;
++ atmel,clk-output-range = <0 83000000>;
+ };
+
+ usart1_clk: usart1_clk {
+ #clock-cells = <0>;
+ reg = <13>;
+- atmel,clk-output-range = <0 66000000>;
++ atmel,clk-output-range = <0 83000000>;
+ };
+
+ usart2_clk: usart2_clk {
+ #clock-cells = <0>;
+ reg = <14>;
+- atmel,clk-output-range = <0 66000000>;
++ atmel,clk-output-range = <0 83000000>;
+ };
+
+ usart3_clk: usart3_clk {
+ #clock-cells = <0>;
+ reg = <15>;
+- atmel,clk-output-range = <0 66000000>;
++ atmel,clk-output-range = <0 83000000>;
+ };
+
+ uart0_clk: uart0_clk {
+ #clock-cells = <0>;
+ reg = <16>;
+- atmel,clk-output-range = <0 66000000>;
++ atmel,clk-output-range = <0 83000000>;
+ };
+
+ twi0_clk: twi0_clk {
+ reg = <18>;
+ #clock-cells = <0>;
+- atmel,clk-output-range = <0 16625000>;
++ atmel,clk-output-range = <0 41500000>;
+ };
+
+ twi1_clk: twi1_clk {
+ #clock-cells = <0>;
+ reg = <19>;
+- atmel,clk-output-range = <0 16625000>;
++ atmel,clk-output-range = <0 41500000>;
+ };
+
+ twi2_clk: twi2_clk {
+ #clock-cells = <0>;
+ reg = <20>;
+- atmel,clk-output-range = <0 16625000>;
++ atmel,clk-output-range = <0 41500000>;
+ };
+
+ mci0_clk: mci0_clk {
+@@ -1246,19 +1246,19 @@
+ spi0_clk: spi0_clk {
+ #clock-cells = <0>;
+ reg = <24>;
+- atmel,clk-output-range = <0 133000000>;
++ atmel,clk-output-range = <0 166000000>;
+ };
+
+ spi1_clk: spi1_clk {
+ #clock-cells = <0>;
+ reg = <25>;
+- atmel,clk-output-range = <0 133000000>;
++ atmel,clk-output-range = <0 166000000>;
+ };
+
+ tcb0_clk: tcb0_clk {
+ #clock-cells = <0>;
+ reg = <26>;
+- atmel,clk-output-range = <0 133000000>;
++ atmel,clk-output-range = <0 166000000>;
+ };
+
+ pwm_clk: pwm_clk {
+@@ -1269,7 +1269,7 @@
+ adc_clk: adc_clk {
+ #clock-cells = <0>;
+ reg = <29>;
+- atmel,clk-output-range = <0 66000000>;
++ atmel,clk-output-range = <0 83000000>;
+ };
+
+ dma0_clk: dma0_clk {
+@@ -1300,13 +1300,13 @@
+ ssc0_clk: ssc0_clk {
+ #clock-cells = <0>;
+ reg = <38>;
+- atmel,clk-output-range = <0 66000000>;
++ atmel,clk-output-range = <0 83000000>;
+ };
+
+ ssc1_clk: ssc1_clk {
+ #clock-cells = <0>;
+ reg = <39>;
+- atmel,clk-output-range = <0 66000000>;
++ atmel,clk-output-range = <0 83000000>;
+ };
+
+ sha_clk: sha_clk {
+diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi
+index cf06a018ed0f..2470dd3fff25 100644
+--- a/arch/arm/boot/dts/sama5d3_can.dtsi
++++ b/arch/arm/boot/dts/sama5d3_can.dtsi
+@@ -36,13 +36,13 @@
+ can0_clk: can0_clk {
+ #clock-cells = <0>;
+ reg = <40>;
+- atmel,clk-output-range = <0 66000000>;
++ atmel,clk-output-range = <0 83000000>;
+ };
+
+ can1_clk: can1_clk {
+ #clock-cells = <0>;
+ reg = <41>;
+- atmel,clk-output-range = <0 66000000>;
++ atmel,clk-output-range = <0 83000000>;
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
+index 1584035daf51..215802b8db30 100644
+--- a/arch/arm/boot/dts/sama5d3_tcb1.dtsi
++++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
+@@ -22,6 +22,7 @@
+ tcb1_clk: tcb1_clk {
+ #clock-cells = <0>;
+ reg = <27>;
++ atmel,clk-output-range = <0 166000000>;
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/sama5d3_uart.dtsi b/arch/arm/boot/dts/sama5d3_uart.dtsi
+index 4316bdbdc25d..cb62adbd28ed 100644
+--- a/arch/arm/boot/dts/sama5d3_uart.dtsi
++++ b/arch/arm/boot/dts/sama5d3_uart.dtsi
+@@ -41,13 +41,13 @@
+ uart0_clk: uart0_clk {
+ #clock-cells = <0>;
+ reg = <16>;
+- atmel,clk-output-range = <0 66000000>;
++ atmel,clk-output-range = <0 83000000>;
+ };
+
+ uart1_clk: uart1_clk {
+ #clock-cells = <0>;
+ reg = <17>;
+- atmel,clk-output-range = <0 66000000>;
++ atmel,clk-output-range = <0 83000000>;
+ };
+ };
+ };
+diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c
+index 6ebbb2b241d2..6fdb0ac62b3d 100644
+--- a/arch/arm/crypto/chacha-glue.c
++++ b/arch/arm/crypto/chacha-glue.c
+@@ -115,7 +115,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
+- if (!neon) {
++ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
+ chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes, state, ctx->nrounds);
+ state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE);
+@@ -159,7 +159,7 @@ static int do_xchacha(struct skcipher_request *req, bool neon)
+
+ chacha_init_generic(state, ctx->key, req->iv);
+
+- if (!neon) {
++ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
+ hchacha_block_arm(state, subctx.key, ctx->nrounds);
+ } else {
+ kernel_neon_begin();
+diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
+index d5af6aedc02c..52665f30d236 100644
+--- a/arch/arm/mach-at91/pm.c
++++ b/arch/arm/mach-at91/pm.c
+@@ -691,6 +691,12 @@ static void __init at91_pm_use_default_mode(int pm_mode)
+ soc_pm.data.suspend_mode = AT91_PM_ULP0;
+ }
+
++static const struct of_device_id atmel_shdwc_ids[] = {
++ { .compatible = "atmel,sama5d2-shdwc" },
++ { .compatible = "microchip,sam9x60-shdwc" },
++ { /* sentinel. */ }
++};
++
+ static void __init at91_pm_modes_init(void)
+ {
+ struct device_node *np;
+@@ -700,7 +706,7 @@ static void __init at91_pm_modes_init(void)
+ !at91_is_pm_mode_active(AT91_PM_ULP1))
+ return;
+
+- np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-shdwc");
++ np = of_find_matching_node(NULL, atmel_shdwc_ids);
+ if (!np) {
+ pr_warn("%s: failed to find shdwc!\n", __func__);
+ goto ulp1_default;
+@@ -751,6 +757,7 @@ static const struct of_device_id atmel_pmc_ids[] __initconst = {
+ { .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
+ { .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
+ { .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
++ { .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[1] },
+ { /* sentinel */ },
+ };
+
+diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
+index 3ef204137e73..054be44d1cdb 100644
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -324,7 +324,7 @@ static inline void poison_init_mem(void *s, size_t count)
+ *p++ = 0xe7fddef0;
+ }
+
+-static inline void
++static inline void __init
+ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+ {
+ struct page *start_pg, *end_pg;
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
+index bd4aab6092e0..e31813a4f972 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
+@@ -143,6 +143,7 @@
+ phy-mode = "sgmii";
+ status = "okay";
+ managed = "in-band-status";
++ phys = <&comphy1 0>;
+ sfp = <&sfp_eth0>;
+ };
+
+@@ -150,11 +151,14 @@
+ phy-mode = "sgmii";
+ status = "okay";
+ managed = "in-band-status";
++ phys = <&comphy0 1>;
+ sfp = <&sfp_eth1>;
+ };
+
+ &usb3 {
+ status = "okay";
++ phys = <&usb2_utmi_otg_phy>;
++ phy-names = "usb2-utmi-otg-phy";
+ };
+
+ &uart0 {
+diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+index bd881497b872..a211a046b2f2 100644
+--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
++++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+@@ -408,6 +408,8 @@
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&cp1_eth2>;
++ phy-mode = "2500base-x";
++ managed = "in-band-status";
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
+index 5f101a20a20a..e08fcb426bbf 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
+@@ -9,6 +9,7 @@
+ / {
+ aliases {
+ serial0 = &blsp2_uart1;
++ serial1 = &blsp1_uart3;
+ };
+
+ chosen {
+diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+index fc7838ea9a01..385b46686194 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+@@ -987,7 +987,7 @@
+
+ tcsr_mutex_regs: syscon@1f40000 {
+ compatible = "syscon";
+- reg = <0x01f40000 0x20000>;
++ reg = <0x01f40000 0x40000>;
+ };
+
+ tlmm: pinctrl@3400000 {
+diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+index b38f9d442fc0..e6d700f8c194 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
++++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+@@ -636,7 +636,6 @@
+ /* audio_clkout0/1/2/3 */
+ #clock-cells = <1>;
+ clock-frequency = <12288000 11289600>;
+- clkout-lr-synchronous;
+
+ status = "okay";
+
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 04cf64e9f0c9..32eff833a43c 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -32,9 +32,7 @@ static unsigned long elf_hwcap __read_mostly;
+ #define COMPAT_ELF_HWCAP_DEFAULT \
+ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+- COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+- COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
++ COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
+ COMPAT_HWCAP_LPAE)
+ unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+ unsigned int compat_elf_hwcap2 __read_mostly;
+@@ -1368,7 +1366,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
+ {
+ /* FP/SIMD is not implemented */
+ .capability = ARM64_HAS_NO_FPSIMD,
+- .type = ARM64_CPUCAP_SYSTEM_FEATURE,
++ .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
+ .min_field_value = 0,
+ .matches = has_no_fpsimd,
+ },
+@@ -1596,6 +1594,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
+ .match_list = list, \
+ }
+
++#define HWCAP_CAP_MATCH(match, cap_type, cap) \
++ { \
++ __HWCAP_CAP(#cap, cap_type, cap) \
++ .matches = match, \
++ }
++
+ #ifdef CONFIG_ARM64_PTR_AUTH
+ static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
+ {
+@@ -1669,8 +1673,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ {},
+ };
+
++#ifdef CONFIG_COMPAT
++static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
++{
++ /*
++ * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
++ * in line with that of arm32 as in vfp_init(). We make sure that the
++ * check is future proof, by making sure value is non-zero.
++ */
++ u32 mvfr1;
++
++ WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
++ if (scope == SCOPE_SYSTEM)
++ mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
++ else
++ mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
++
++ return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
++ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
++ cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
++}
++#endif
++
+ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
+ #ifdef CONFIG_COMPAT
++ HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
++ HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
++ /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
++ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
++ HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 7c6a0a41676f..d54d165b286a 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -653,6 +653,7 @@ el0_sync:
+ mov x0, sp
+ bl el0_sync_handler
+ b ret_to_user
++ENDPROC(el0_sync)
+
+ #ifdef CONFIG_COMPAT
+ .align 6
+@@ -661,16 +662,18 @@ el0_sync_compat:
+ mov x0, sp
+ bl el0_sync_compat_handler
+ b ret_to_user
+-ENDPROC(el0_sync)
++ENDPROC(el0_sync_compat)
+
+ .align 6
+ el0_irq_compat:
+ kernel_entry 0, 32
+ b el0_irq_naked
++ENDPROC(el0_irq_compat)
+
+ el0_error_compat:
+ kernel_entry 0, 32
+ b el0_error_naked
++ENDPROC(el0_error_compat)
+ #endif
+
+ .align 6
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 3eb338f14386..94289d126993 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -269,6 +269,7 @@ static void sve_free(struct task_struct *task)
+ */
+ static void task_fpsimd_load(void)
+ {
++ WARN_ON(!system_supports_fpsimd());
+ WARN_ON(!have_cpu_fpsimd_context());
+
+ if (system_supports_sve() && test_thread_flag(TIF_SVE))
+@@ -289,6 +290,7 @@ static void fpsimd_save(void)
+ this_cpu_ptr(&fpsimd_last_state);
+ /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
+
++ WARN_ON(!system_supports_fpsimd());
+ WARN_ON(!have_cpu_fpsimd_context());
+
+ if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
+@@ -1092,6 +1094,7 @@ void fpsimd_bind_task_to_cpu(void)
+ struct fpsimd_last_state_struct *last =
+ this_cpu_ptr(&fpsimd_last_state);
+
++ WARN_ON(!system_supports_fpsimd());
+ last->st = &current->thread.uw.fpsimd_state;
+ last->sve_state = current->thread.sve_state;
+ last->sve_vl = current->thread.sve_vl;
+@@ -1114,6 +1117,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+ struct fpsimd_last_state_struct *last =
+ this_cpu_ptr(&fpsimd_last_state);
+
++ WARN_ON(!system_supports_fpsimd());
+ WARN_ON(!in_softirq() && !irqs_disabled());
+
+ last->st = st;
+@@ -1128,8 +1132,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+ */
+ void fpsimd_restore_current_state(void)
+ {
+- if (!system_supports_fpsimd())
++ /*
++ * For the tasks that were created before we detected the absence of
++ * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
++ * e.g, init. This could be then inherited by the children processes.
++ * If we later detect that the system doesn't support FP/SIMD,
++ * we must clear the flag for all the tasks to indicate that the
++ * FPSTATE is clean (as we can't have one) to avoid looping for ever in
++ * do_notify_resume().
++ */
++ if (!system_supports_fpsimd()) {
++ clear_thread_flag(TIF_FOREIGN_FPSTATE);
+ return;
++ }
+
+ get_cpu_fpsimd_context();
+
+@@ -1148,7 +1163,7 @@ void fpsimd_restore_current_state(void)
+ */
+ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
+ {
+- if (!system_supports_fpsimd())
++ if (WARN_ON(!system_supports_fpsimd()))
+ return;
+
+ get_cpu_fpsimd_context();
+@@ -1179,7 +1194,13 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
+ void fpsimd_flush_task_state(struct task_struct *t)
+ {
+ t->thread.fpsimd_cpu = NR_CPUS;
+-
++ /*
++ * If we don't support fpsimd, bail out after we have
++ * reset the fpsimd_cpu for this task and clear the
++ * FPSTATE.
++ */
++ if (!system_supports_fpsimd())
++ return;
+ barrier();
+ set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
+
+@@ -1193,6 +1214,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
+ */
+ static void fpsimd_flush_cpu_state(void)
+ {
++ WARN_ON(!system_supports_fpsimd());
+ __this_cpu_write(fpsimd_last_state.st, NULL);
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+ }
+@@ -1203,6 +1225,8 @@ static void fpsimd_flush_cpu_state(void)
+ */
+ void fpsimd_save_and_flush_cpu_state(void)
+ {
++ if (!system_supports_fpsimd())
++ return;
+ WARN_ON(preemptible());
+ __get_cpu_fpsimd_context();
+ fpsimd_save();
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 6771c399d40c..cd6e5fa48b9c 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -615,6 +615,13 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+ return 0;
+ }
+
++static int fpr_active(struct task_struct *target, const struct user_regset *regset)
++{
++ if (!system_supports_fpsimd())
++ return -ENODEV;
++ return regset->n;
++}
++
+ /*
+ * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
+ */
+@@ -637,6 +644,9 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+ {
++ if (!system_supports_fpsimd())
++ return -EINVAL;
++
+ if (target == current)
+ fpsimd_preserve_current_state();
+
+@@ -676,6 +686,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ {
+ int ret;
+
++ if (!system_supports_fpsimd())
++ return -EINVAL;
++
+ ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
+ if (ret)
+ return ret;
+@@ -1134,6 +1147,7 @@ static const struct user_regset aarch64_regsets[] = {
+ */
+ .size = sizeof(u32),
+ .align = sizeof(u32),
++ .active = fpr_active,
+ .get = fpr_get,
+ .set = fpr_set
+ },
+@@ -1348,6 +1362,9 @@ static int compat_vfp_get(struct task_struct *target,
+ compat_ulong_t fpscr;
+ int ret, vregs_end_pos;
+
++ if (!system_supports_fpsimd())
++ return -EINVAL;
++
+ uregs = &target->thread.uw.fpsimd_state;
+
+ if (target == current)
+@@ -1381,6 +1398,9 @@ static int compat_vfp_set(struct task_struct *target,
+ compat_ulong_t fpscr;
+ int ret, vregs_end_pos;
+
++ if (!system_supports_fpsimd())
++ return -EINVAL;
++
+ uregs = &target->thread.uw.fpsimd_state;
+
+ vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
+@@ -1438,6 +1458,7 @@ static const struct user_regset aarch32_regsets[] = {
+ .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
+ .size = sizeof(compat_ulong_t),
+ .align = sizeof(compat_ulong_t),
++ .active = fpr_active,
+ .get = compat_vfp_get,
+ .set = compat_vfp_set
+ },
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
+index 72fbbd86eb5e..e5816d885761 100644
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -28,7 +28,15 @@
+ /* Check whether the FP regs were dirtied while in the host-side run loop: */
+ static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
+ {
+- if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
++ /*
++ * When the system doesn't support FP/SIMD, we cannot rely on
++ * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
++ * abort on the very first access to FP and thus we should never
++ * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
++ * trap the accesses.
++ */
++ if (!system_supports_fpsimd() ||
++ vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
+ vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
+ KVM_ARM64_FP_HOST);
+
+diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
+index dab1fea4752a..a4f48c1ac28c 100644
+--- a/arch/arm64/kvm/va_layout.c
++++ b/arch/arm64/kvm/va_layout.c
+@@ -13,52 +13,46 @@
+ #include <asm/kvm_mmu.h>
+
+ /*
+- * The LSB of the random hyp VA tag or 0 if no randomization is used.
++ * The LSB of the HYP VA tag
+ */
+ static u8 tag_lsb;
+ /*
+- * The random hyp VA tag value with the region bit if hyp randomization is used
++ * The HYP VA tag value with the region bit
+ */
+ static u64 tag_val;
+ static u64 va_mask;
+
++/*
++ * We want to generate a hyp VA with the following format (with V ==
++ * vabits_actual):
++ *
++ * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
++ * ---------------------------------------------------------
++ * | 0000000 | hyp_va_msb | random tag | kern linear VA |
++ * |--------- tag_val -----------|----- va_mask ---|
++ *
++ * which does not conflict with the idmap regions.
++ */
+ __init void kvm_compute_layout(void)
+ {
+ phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
+ u64 hyp_va_msb;
+- int kva_msb;
+
+ /* Where is my RAM region? */
+ hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
+ hyp_va_msb ^= BIT(vabits_actual - 1);
+
+- kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
++ tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
+ (u64)(high_memory - 1));
+
+- if (kva_msb == (vabits_actual - 1)) {
+- /*
+- * No space in the address, let's compute the mask so
+- * that it covers (vabits_actual - 1) bits, and the region
+- * bit. The tag stays set to zero.
+- */
+- va_mask = BIT(vabits_actual - 1) - 1;
+- va_mask |= hyp_va_msb;
+- } else {
+- /*
+- * We do have some free bits to insert a random tag.
+- * Hyp VAs are now created from kernel linear map VAs
+- * using the following formula (with V == vabits_actual):
+- *
+- * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
+- * ---------------------------------------------------------
+- * | 0000000 | hyp_va_msb | random tag | kern linear VA |
+- */
+- tag_lsb = kva_msb;
+- va_mask = GENMASK_ULL(tag_lsb - 1, 0);
+- tag_val = get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
+- tag_val |= hyp_va_msb;
+- tag_val >>= tag_lsb;
++ va_mask = GENMASK_ULL(tag_lsb - 1, 0);
++ tag_val = hyp_va_msb;
++
++ if (tag_lsb != (vabits_actual - 1)) {
++ /* We have some free bits to insert a random tag. */
++ tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
+ }
++ tag_val >>= tag_lsb;
+ }
+
+ static u32 compute_instruction(int n, u32 rd, u32 rn)
+@@ -117,11 +111,11 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
+ * VHE doesn't need any address translation, let's NOP
+ * everything.
+ *
+- * Alternatively, if we don't have any spare bits in
+- * the address, NOP everything after masking that
+- * kernel VA.
++ * Alternatively, if the tag is zero (because the layout
++ * dictates it and we don't have any spare bits in the
++ * address), NOP everything after masking the kernel VA.
+ */
+- if (has_vhe() || (!tag_lsb && i > 0)) {
++ if (has_vhe() || (!tag_val && i > 0)) {
+ updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
+ continue;
+ }
+diff --git a/arch/mips/loongson64/platform.c b/arch/mips/loongson64/platform.c
+index 13f3404f0030..9674ae1361a8 100644
+--- a/arch/mips/loongson64/platform.c
++++ b/arch/mips/loongson64/platform.c
+@@ -27,6 +27,9 @@ static int __init loongson3_platform_init(void)
+ continue;
+
+ pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
++ if (!pdev)
++ return -ENOMEM;
++
+ pdev->name = loongson_sysconf.sensors[i].name;
+ pdev->id = loongson_sysconf.sensors[i].id;
+ pdev->dev.platform_data = &loongson_sysconf.sensors[i];
+diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
+index 4e1d39847462..0b063830eea8 100644
+--- a/arch/powerpc/Kconfig.debug
++++ b/arch/powerpc/Kconfig.debug
+@@ -371,7 +371,7 @@ config PPC_PTDUMP
+
+ config PPC_DEBUG_WX
+ bool "Warn on W+X mappings at boot"
+- depends on PPC_PTDUMP
++ depends on PPC_PTDUMP && STRICT_KERNEL_RWX
+ help
+ Generate a warning if any W+X mappings are found at boot.
+
+diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
+index 73b84166d06a..5fb90edd865e 100644
+--- a/arch/powerpc/mm/pgtable_32.c
++++ b/arch/powerpc/mm/pgtable_32.c
+@@ -218,6 +218,7 @@ void mark_rodata_ro(void)
+
+ if (v_block_mapped((unsigned long)_sinittext)) {
+ mmu_mark_rodata_ro();
++ ptdump_check_wx();
+ return;
+ }
+
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 6ba081dd61c9..b4ce9d472dfe 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -36,7 +36,6 @@
+ #include <asm/udbg.h>
+ #include <asm/mmzone.h>
+ #include <asm/plpar_wrappers.h>
+-#include <asm/svm.h>
+
+ #include "pseries.h"
+
+@@ -133,10 +132,10 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
+ return be64_to_cpu(*tcep);
+ }
+
+-static void tce_free_pSeriesLP(struct iommu_table*, long, long);
++static void tce_free_pSeriesLP(unsigned long liobn, long, long);
+ static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
+
+-static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
++static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
+ long npages, unsigned long uaddr,
+ enum dma_data_direction direction,
+ unsigned long attrs)
+@@ -147,25 +146,25 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
+ int ret = 0;
+ long tcenum_start = tcenum, npages_start = npages;
+
+- rpn = __pa(uaddr) >> TCE_SHIFT;
++ rpn = __pa(uaddr) >> tceshift;
+ proto_tce = TCE_PCI_READ;
+ if (direction != DMA_TO_DEVICE)
+ proto_tce |= TCE_PCI_WRITE;
+
+ while (npages--) {
+- tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
+- rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
++ tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
++ rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
+
+ if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
+ ret = (int)rc;
+- tce_free_pSeriesLP(tbl, tcenum_start,
++ tce_free_pSeriesLP(liobn, tcenum_start,
+ (npages_start - (npages + 1)));
+ break;
+ }
+
+ if (rc && printk_ratelimit()) {
+ printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
+- printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
++ printk("\tindex = 0x%llx\n", (u64)liobn);
+ printk("\ttcenum = 0x%llx\n", (u64)tcenum);
+ printk("\ttce val = 0x%llx\n", tce );
+ dump_stack();
+@@ -194,7 +193,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+ unsigned long flags;
+
+ if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
+- return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
++ return tce_build_pSeriesLP(tbl->it_index, tcenum,
++ tbl->it_page_shift, npages, uaddr,
+ direction, attrs);
+ }
+
+@@ -210,8 +210,9 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+ /* If allocation fails, fall back to the loop implementation */
+ if (!tcep) {
+ local_irq_restore(flags);
+- return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
+- direction, attrs);
++ return tce_build_pSeriesLP(tbl->it_index, tcenum,
++ tbl->it_page_shift,
++ npages, uaddr, direction, attrs);
+ }
+ __this_cpu_write(tce_page, tcep);
+ }
+@@ -262,16 +263,16 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+ return ret;
+ }
+
+-static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
++static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
+ {
+ u64 rc;
+
+ while (npages--) {
+- rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
++ rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
+
+ if (rc && printk_ratelimit()) {
+ printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
+- printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
++ printk("\tindex = 0x%llx\n", (u64)liobn);
+ printk("\ttcenum = 0x%llx\n", (u64)tcenum);
+ dump_stack();
+ }
+@@ -286,7 +287,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
+ u64 rc;
+
+ if (!firmware_has_feature(FW_FEATURE_MULTITCE))
+- return tce_free_pSeriesLP(tbl, tcenum, npages);
++ return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
+
+ rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
+
+@@ -401,6 +402,19 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
+ u64 rc = 0;
+ long l, limit;
+
++ if (!firmware_has_feature(FW_FEATURE_MULTITCE)) {
++ unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
++ unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
++ be64_to_cpu(maprange->dma_base);
++ unsigned long tcenum = dmastart >> tceshift;
++ unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
++ void *uaddr = __va(start_pfn << PAGE_SHIFT);
++
++ return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
++ tcenum, tceshift, npages, (unsigned long) uaddr,
++ DMA_BIDIRECTIONAL, 0);
++ }
++
+ local_irq_disable(); /* to protect tcep and the page behind it */
+ tcep = __this_cpu_read(tce_page);
+
+@@ -1320,15 +1334,7 @@ void iommu_init_early_pSeries(void)
+ of_reconfig_notifier_register(&iommu_reconfig_nb);
+ register_memory_notifier(&iommu_mem_nb);
+
+- /*
+- * Secure guest memory is inacessible to devices so regular DMA isn't
+- * possible.
+- *
+- * In that case keep devices' dma_map_ops as NULL so that the generic
+- * DMA code path will use SWIOTLB to bounce buffers for DMA.
+- */
+- if (!is_secure_guest())
+- set_pci_dma_ops(&dma_iommu_ops);
++ set_pci_dma_ops(&dma_iommu_ops);
+ }
+
+ static int __init disable_multitce(char *str)
+diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
+index c2ef320ba1bf..eb420655ed0b 100644
+--- a/arch/powerpc/platforms/pseries/papr_scm.c
++++ b/arch/powerpc/platforms/pseries/papr_scm.c
+@@ -322,6 +322,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
+ p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
+ if (!p->bus) {
+ dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
++ kfree(p->bus_desc.provider_name);
+ return -ENXIO;
+ }
+
+@@ -477,6 +478,7 @@ static int papr_scm_remove(struct platform_device *pdev)
+
+ nvdimm_bus_unregister(p->bus);
+ drc_pmem_unbind(p);
++ kfree(p->bus_desc.provider_name);
+ kfree(p);
+
+ return 0;
+diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
+index 79e2287991db..f682b7babc09 100644
+--- a/arch/powerpc/platforms/pseries/vio.c
++++ b/arch/powerpc/platforms/pseries/vio.c
+@@ -1176,6 +1176,8 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
+ if (tbl == NULL)
+ return NULL;
+
++ kref_init(&tbl->it_kref);
++
+ of_parse_dma_window(dev->dev.of_node, dma_window,
+ &tbl->it_index, &offset, &size);
+
+diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c
+index 25019d42ae93..ef2ad7253cd5 100644
+--- a/arch/x86/boot/compressed/acpi.c
++++ b/arch/x86/boot/compressed/acpi.c
+@@ -393,7 +393,13 @@ int count_immovable_mem_regions(void)
+ table = table_addr + sizeof(struct acpi_table_srat);
+
+ while (table + sizeof(struct acpi_subtable_header) < table_end) {
++
+ sub_table = (struct acpi_subtable_header *)table;
++ if (!sub_table->length) {
++ debug_putstr("Invalid zero length SRAT subtable.\n");
++ return 0;
++ }
++
+ if (sub_table->type == ACPI_SRAT_TYPE_MEMORY_AFFINITY) {
+ struct acpi_srat_mem_affinity *ma;
+
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 9ec463fe96f2..2f1e2333bd0a 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -23,6 +23,7 @@
+ #include <asm/nmi.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <asm/insn.h>
+ #include <asm/io.h>
+ #include <asm/fixmap.h>
+
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index 82513b6b0abd..2c96963b2e51 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -2102,6 +2102,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
+ * If the key or authentication tag size couldn't be set, no need to
+ * continue to encrypt.
+ */
++ vec->crypt_error = 0;
+ if (vec->setkey_error || vec->setauthsize_error)
+ goto done;
+
+@@ -2245,10 +2246,12 @@ static int test_aead_vs_generic_impl(const char *driver,
+ req, tsgls);
+ if (err)
+ goto out;
+- err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, cfg,
+- req, tsgls);
+- if (err)
+- goto out;
++ if (vec.crypt_error == 0) {
++ err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name,
++ cfg, req, tsgls);
++ if (err)
++ goto out;
++ }
+ cond_resched();
+ }
+ err = 0;
+@@ -2678,6 +2681,15 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
+ skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
+ skcipher_request_set_crypt(req, &src, &dst, vec->len, iv);
+ vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
++ if (vec->crypt_error != 0) {
++ /*
++ * The only acceptable error here is for an invalid length, so
++ * skcipher decryption should fail with the same error too.
++ * We'll test for this. But to keep the API usage well-defined,
++ * explicitly initialize the ciphertext buffer too.
++ */
++ memset((u8 *)vec->ctext, 0, vec->len);
++ }
+ done:
+ snprintf(name, max_namelen, "\"random: len=%u klen=%u\"",
+ vec->len, vec->klen);
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 19f57ccfbe1d..59f911e57719 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
+
+ WARN_ON(!map->bus);
+
+- /* Check for unwritable registers before we start */
+- for (i = 0; i < val_len / map->format.val_bytes; i++)
+- if (!regmap_writeable(map,
+- reg + regmap_get_offset(map, i)))
+- return -EINVAL;
++ /* Check for unwritable or noinc registers in range
++ * before we start
++ */
++ if (!regmap_writeable_noinc(map, reg)) {
++ for (i = 0; i < val_len / map->format.val_bytes; i++) {
++ unsigned int element =
++ reg + regmap_get_offset(map, i);
++ if (!regmap_writeable(map, element) ||
++ regmap_writeable_noinc(map, element))
++ return -EINVAL;
++ }
++ }
+
+ if (!map->cache_bypass && map->format.parse_val) {
+ unsigned int ival;
+diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
+index b3af61cc6fb9..d2760a021301 100644
+--- a/drivers/clk/meson/g12a.c
++++ b/drivers/clk/meson/g12a.c
+@@ -4692,6 +4692,7 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
+ &g12a_bt656,
+ &g12a_usb1_to_ddr,
+ &g12a_mmc_pclk,
++ &g12a_uart2,
+ &g12a_vpu_intr,
+ &g12a_gic,
+ &g12a_sd_emmc_a_clk0,
+diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
+index 8ea0e4bcde0d..7394671f815b 100644
+--- a/drivers/crypto/atmel-sha.c
++++ b/drivers/crypto/atmel-sha.c
+@@ -1918,12 +1918,7 @@ static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ {
+ struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
+
+- if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
+- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+- return -EINVAL;
+- }
+-
+- return 0;
++ return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen);
+ }
+
+ static int atmel_sha_hmac_init(struct ahash_request *req)
+diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
+index 4b20606983a4..22ebe40f09f5 100644
+--- a/drivers/crypto/axis/artpec6_crypto.c
++++ b/drivers/crypto/axis/artpec6_crypto.c
+@@ -1251,7 +1251,7 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
+
+ if (len != 16 && len != 24 && len != 32) {
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+- return -1;
++ return -EINVAL;
+ }
+
+ ctx->key_length = len;
+diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
+index 3443f6d6dd83..6863d7097674 100644
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -2481,7 +2481,7 @@ static struct caam_aead_alg driver_aeads[] = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+- "hmac-sha256-cbc-desi-"
++ "hmac-sha256-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
+index a0ee404b736e..f1d149e32839 100644
+--- a/drivers/dma/dma-axi-dmac.c
++++ b/drivers/dma/dma-axi-dmac.c
+@@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
+ struct dma_device *dma_dev;
+ struct axi_dmac *dmac;
+ struct resource *res;
++ struct regmap *regmap;
+ int ret;
+
+ dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+@@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, dmac);
+
+- devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
++ regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
++ &axi_dmac_regmap_config);
++ if (IS_ERR(regmap)) {
++ ret = PTR_ERR(regmap);
++ goto err_free_irq;
++ }
+
+ return 0;
+
++err_free_irq:
++ free_irq(dmac->irq, dmac);
+ err_unregister_of:
+ of_dma_controller_free(pdev->dev.of_node);
+ err_unregister_device:
+diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+index 958161c71985..790ea3fda693 100644
+--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
++++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+@@ -273,6 +273,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
+ bus->adap.dev.parent = &pdev->dev;
+ bus->adap.dev.of_node = pdev->dev.of_node;
+ bus->adap.retries = I2C_MAX_RETRIES;
++ ACPI_COMPANION_SET(&bus->adap.dev, ACPI_COMPANION(&pdev->dev));
+
+ err = i2c_add_adapter(&bus->adap);
+ if (err)
+@@ -298,7 +299,7 @@ static const struct of_device_id cros_ec_i2c_of_match[] = {
+ MODULE_DEVICE_TABLE(of, cros_ec_i2c_of_match);
+
+ static const struct acpi_device_id cros_ec_i2c_tunnel_acpi_id[] = {
+- { "GOOG001A", 0 },
++ { "GOOG0012", 0 },
+ { }
+ };
+ MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_tunnel_acpi_id);
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index 606fa6d86685..1753a9801b70 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -139,7 +139,7 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
+ if (ib_nl_is_good_ip_resp(nlh))
+ ib_nl_process_good_ip_rsep(nlh);
+
+- return skb->len;
++ return 0;
+ }
+
+ static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 43a6f07e0afe..af1afc17b8bd 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -3118,6 +3118,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
+ rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
+ rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
+
++ atomic_inc(&id_priv->refcount);
+ cma_init_resolve_addr_work(work, id_priv);
+ queue_work(cma_wq, &work->work);
+ return 0;
+@@ -3144,6 +3145,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
+ rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
+ &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
+
++ atomic_inc(&id_priv->refcount);
+ cma_init_resolve_addr_work(work, id_priv);
+ queue_work(cma_wq, &work->work);
+ return 0;
+diff --git a/drivers/infiniband/core/ib_core_uverbs.c b/drivers/infiniband/core/ib_core_uverbs.c
+index b7cb59844ece..b51bd7087a88 100644
+--- a/drivers/infiniband/core/ib_core_uverbs.c
++++ b/drivers/infiniband/core/ib_core_uverbs.c
+@@ -232,7 +232,9 @@ void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
+ if (!entry)
+ return;
+
++ xa_lock(&entry->ucontext->mmap_xa);
+ entry->driver_removed = true;
++ xa_unlock(&entry->ucontext->mmap_xa);
+ kref_put(&entry->ref, rdma_user_mmap_entry_free);
+ }
+ EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
+diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
+index 8917125ea16d..30d4c126a2db 100644
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -1068,7 +1068,7 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
+ }
+
+ settimeout_out:
+- return skb->len;
++ return 0;
+ }
+
+ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
+@@ -1139,7 +1139,7 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+ }
+
+ resp_out:
+- return skb->len;
++ return 0;
+ }
+
+ static void free_sm_ah(struct kref *kref)
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 7a3b99597ead..40cadb889114 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -166,10 +166,13 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ * for any address.
+ */
+ mask |= (sg_dma_address(sg) + pgoff) ^ va;
+- if (i && i != (umem->nmap - 1))
+- /* restrict by length as well for interior SGEs */
+- mask |= sg_dma_len(sg);
+ va += sg_dma_len(sg) - pgoff;
++ /* Except for the last entry, the ending iova alignment sets
++ * the maximum possible page size as the low bits of the iova
++ * must be zero when starting the next chunk.
++ */
++ if (i != (umem->nmap - 1))
++ mask |= va;
+ pgoff = 0;
+ }
+ best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap);
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 970d8e31dd65..8f5de4dcad97 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -220,7 +220,6 @@ void ib_uverbs_release_file(struct kref *ref)
+ }
+
+ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
+- struct ib_uverbs_file *uverbs_file,
+ struct file *filp, char __user *buf,
+ size_t count, loff_t *pos,
+ size_t eventsz)
+@@ -238,19 +237,16 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
+
+ if (wait_event_interruptible(ev_queue->poll_wait,
+ (!list_empty(&ev_queue->event_list) ||
+- /* The barriers built into wait_event_interruptible()
+- * and wake_up() guarentee this will see the null set
+- * without using RCU
+- */
+- !uverbs_file->device->ib_dev)))
++ ev_queue->is_closed)))
+ return -ERESTARTSYS;
+
++ spin_lock_irq(&ev_queue->lock);
++
+ /* If device was disassociated and no event exists set an error */
+- if (list_empty(&ev_queue->event_list) &&
+- !uverbs_file->device->ib_dev)
++ if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
++ spin_unlock_irq(&ev_queue->lock);
+ return -EIO;
+-
+- spin_lock_irq(&ev_queue->lock);
++ }
+ }
+
+ event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
+@@ -285,8 +281,7 @@ static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
+ {
+ struct ib_uverbs_async_event_file *file = filp->private_data;
+
+- return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp,
+- buf, count, pos,
++ return ib_uverbs_event_read(&file->ev_queue, filp, buf, count, pos,
+ sizeof(struct ib_uverbs_async_event_desc));
+ }
+
+@@ -296,9 +291,8 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
+ struct ib_uverbs_completion_event_file *comp_ev_file =
+ filp->private_data;
+
+- return ib_uverbs_event_read(&comp_ev_file->ev_queue,
+- comp_ev_file->uobj.ufile, filp,
+- buf, count, pos,
++ return ib_uverbs_event_read(&comp_ev_file->ev_queue, filp, buf, count,
++ pos,
+ sizeof(struct ib_uverbs_comp_event_desc));
+ }
+
+@@ -321,7 +315,9 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
+ static __poll_t ib_uverbs_async_event_poll(struct file *filp,
+ struct poll_table_struct *wait)
+ {
+- return ib_uverbs_event_poll(filp->private_data, filp, wait);
++ struct ib_uverbs_async_event_file *file = filp->private_data;
++
++ return ib_uverbs_event_poll(&file->ev_queue, filp, wait);
+ }
+
+ static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
+@@ -335,9 +331,9 @@ static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
+
+ static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
+ {
+- struct ib_uverbs_event_queue *ev_queue = filp->private_data;
++ struct ib_uverbs_async_event_file *file = filp->private_data;
+
+- return fasync_helper(fd, filp, on, &ev_queue->async_queue);
++ return fasync_helper(fd, filp, on, &file->ev_queue.async_queue);
+ }
+
+ static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index d44cf33df81a..238614370927 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -1225,6 +1225,8 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
+ const struct in_ifaddr *ifa;
+
+ idev = in_dev_get(dev);
++ if (!idev)
++ continue;
+ in_dev_for_each_ifa_rtnl(ifa, idev) {
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
+ "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
+diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
+index ecd6cadd529a..b591861934b3 100644
+--- a/drivers/infiniband/hw/mlx4/cm.c
++++ b/drivers/infiniband/hw/mlx4/cm.c
+@@ -186,23 +186,6 @@ out:
+ kfree(ent);
+ }
+
+-static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
+-{
+- struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
+- struct rb_root *sl_id_map = &sriov->sl_id_map;
+- struct id_map_entry *ent, *found_ent;
+-
+- spin_lock(&sriov->id_map_lock);
+- ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
+- if (!ent)
+- goto out;
+- found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
+- if (found_ent && found_ent == ent)
+- rb_erase(&found_ent->node, sl_id_map);
+-out:
+- spin_unlock(&sriov->id_map_lock);
+-}
+-
+ static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
+ {
+ struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
+@@ -294,7 +277,7 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
+ spin_lock(&sriov->id_map_lock);
+ spin_lock_irqsave(&sriov->going_down_lock, flags);
+ /*make sure that there is no schedule inside the scheduled work.*/
+- if (!sriov->is_going_down) {
++ if (!sriov->is_going_down && !id->scheduled_delete) {
+ id->scheduled_delete = 1;
+ schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+ }
+@@ -341,9 +324,6 @@ cont:
+
+ if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
+ schedule_delayed(ibdev, id);
+- else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
+- id_map_find_del(ibdev, pv_cm_id);
+-
+ return 0;
+ }
+
+@@ -382,12 +362,9 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
+ *slave = id->slave_id;
+ set_remote_comm_id(mad, id->sl_cm_id);
+
+- if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
++ if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
++ mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
+ schedule_delayed(ibdev, id);
+- else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
+- mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
+- id_map_find_del(ibdev, (int) pv_cm_id);
+- }
+
+ return 0;
+ }
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index 34055cbab38c..2f5d9b181848 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -246,6 +246,13 @@ static int mlx4_ib_update_gids(struct gid_entry *gids,
+ return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
+ }
+
++static void free_gid_entry(struct gid_entry *entry)
++{
++ memset(&entry->gid, 0, sizeof(entry->gid));
++ kfree(entry->ctx);
++ entry->ctx = NULL;
++}
++
+ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
+ {
+ struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
+@@ -313,6 +320,8 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
+ GFP_ATOMIC);
+ if (!gids) {
+ ret = -ENOMEM;
++ *context = NULL;
++ free_gid_entry(&port_gid_table->gids[free]);
+ } else {
+ for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
+ memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
+@@ -324,6 +333,12 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
+
+ if (!ret && hw_update) {
+ ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
++ if (ret) {
++ spin_lock_bh(&iboe->lock);
++ *context = NULL;
++ free_gid_entry(&port_gid_table->gids[free]);
++ spin_unlock_bh(&iboe->lock);
++ }
+ kfree(gids);
+ }
+
+@@ -353,10 +368,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
+ if (!ctx->refcount) {
+ unsigned int real_index = ctx->real_index;
+
+- memset(&port_gid_table->gids[real_index].gid, 0,
+- sizeof(port_gid_table->gids[real_index].gid));
+- kfree(port_gid_table->gids[real_index].ctx);
+- port_gid_table->gids[real_index].ctx = NULL;
++ free_gid_entry(&port_gid_table->gids[real_index]);
+ hw_update = 1;
+ }
+ }
+diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
+index 4f0edd4832bd..b61165359954 100644
+--- a/drivers/infiniband/hw/mlx5/ib_virt.c
++++ b/drivers/infiniband/hw/mlx5/ib_virt.c
+@@ -164,8 +164,10 @@ static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
+ in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
+ in->node_guid = guid;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+- if (!err)
++ if (!err) {
+ vfs_ctx[vf].node_guid = guid;
++ vfs_ctx[vf].node_guid_valid = 1;
++ }
+ kfree(in);
+ return err;
+ }
+@@ -185,8 +187,10 @@ static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
+ in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
+ in->port_guid = guid;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+- if (!err)
++ if (!err) {
+ vfs_ctx[vf].port_guid = guid;
++ vfs_ctx[vf].port_guid_valid = 1;
++ }
+ kfree(in);
+ return err;
+ }
+@@ -208,20 +212,12 @@ int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ {
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+- struct mlx5_hca_vport_context *rep;
+- int err;
+-
+- rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+- if (!rep)
+- return -ENOMEM;
++ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
+
+- err = mlx5_query_hca_vport_context(mdev, 1, 1, vf+1, rep);
+- if (err)
+- goto ex;
++ node_guid->guid =
++ vfs_ctx[vf].node_guid_valid ? vfs_ctx[vf].node_guid : 0;
++ port_guid->guid =
++ vfs_ctx[vf].port_guid_valid ? vfs_ctx[vf].port_guid : 0;
+
+- port_guid->guid = rep->port_guid;
+- node_guid->guid = rep->node_guid;
+-ex:
+- kfree(rep);
+- return err;
++ return 0;
+ }
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index ea8bfc3e2d8d..23c4529edf54 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1247,6 +1247,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
+ length == U64_MAX) {
++ if (virt_addr != start)
++ return ERR_PTR(-EINVAL);
+ if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
+ !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
+ return ERR_PTR(-EINVAL);
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index f924250f80c2..8247c26a1ce9 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -624,11 +624,10 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
+ bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
+ unsigned long current_seq;
+ u64 access_mask;
+- u64 start_idx, page_mask;
++ u64 start_idx;
+
+ page_shift = odp->page_shift;
+- page_mask = ~(BIT(page_shift) - 1);
+- start_idx = (user_va - (mr->mmkey.iova & page_mask)) >> page_shift;
++ start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
+ access_mask = ODP_READ_ALLOWED_BIT;
+
+ if (odp->umem.writable && !downgrade)
+@@ -767,11 +766,19 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
+ {
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
++ if (unlikely(io_virt < mr->mmkey.iova))
++ return -EFAULT;
++
+ if (!odp->is_implicit_odp) {
+- if (unlikely(io_virt < ib_umem_start(odp) ||
+- ib_umem_end(odp) - io_virt < bcnt))
++ u64 user_va;
++
++ if (check_add_overflow(io_virt - mr->mmkey.iova,
++ (u64)odp->umem.address, &user_va))
++ return -EFAULT;
++ if (unlikely(user_va >= ib_umem_end(odp) ||
++ ib_umem_end(odp) - user_va < bcnt))
+ return -EFAULT;
+- return pagefault_real_mr(mr, odp, io_virt, bcnt, bytes_mapped,
++ return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
+ flags);
+ }
+ return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index b7f7a5f7bd98..cd1181c39ed2 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -2546,7 +2546,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
+ if (lrsp->opcode == SRP_LOGIN_RSP) {
+ ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
+ ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
+- ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
++ ch->use_imm_data = srp_use_imm_data &&
++ (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
+ ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
+ ch->use_imm_data,
+ target->max_it_iu_size);
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index effe72eb89e7..2f7680faba49 100644
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -856,6 +856,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
+ cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
+ break;
+ case CMDQ_OP_TLBI_NH_VA:
++ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
+ cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index be2a2a201603..33ddc5269e8d 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -417,10 +417,14 @@ err:
+
+ /* Journalling */
+
++#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask))
++
+ static void btree_flush_write(struct cache_set *c)
+ {
+ struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
+- unsigned int i, n;
++ unsigned int i, nr, ref_nr;
++ atomic_t *fifo_front_p, *now_fifo_front_p;
++ size_t mask;
+
+ if (c->journal.btree_flushing)
+ return;
+@@ -433,12 +437,50 @@ static void btree_flush_write(struct cache_set *c)
+ c->journal.btree_flushing = true;
+ spin_unlock(&c->journal.flush_write_lock);
+
++ /* get the oldest journal entry and check its refcount */
++ spin_lock(&c->journal.lock);
++ fifo_front_p = &fifo_front(&c->journal.pin);
++ ref_nr = atomic_read(fifo_front_p);
++ if (ref_nr <= 0) {
++ /*
++ * do nothing if no btree node references
++ * the oldest journal entry
++ */
++ spin_unlock(&c->journal.lock);
++ goto out;
++ }
++ spin_unlock(&c->journal.lock);
++
++ mask = c->journal.pin.mask;
++ nr = 0;
+ atomic_long_inc(&c->flush_write);
+ memset(btree_nodes, 0, sizeof(btree_nodes));
+- n = 0;
+
+ mutex_lock(&c->bucket_lock);
+ list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
++ /*
++ * It is safe to get now_fifo_front_p without holding
++ * c->journal.lock here, because we don't need to know
++ * the exactly accurate value, just check whether the
++ * front pointer of c->journal.pin is changed.
++ */
++ now_fifo_front_p = &fifo_front(&c->journal.pin);
++ /*
++ * If the oldest journal entry is reclaimed and front
++ * pointer of c->journal.pin changes, it is unnecessary
++ * to scan c->btree_cache anymore, just quit the loop and
++ * flush out what we have already.
++ */
++ if (now_fifo_front_p != fifo_front_p)
++ break;
++ /*
++ * quit this loop if all matching btree nodes are
++ * scanned and record in btree_nodes[] already.
++ */
++ ref_nr = atomic_read(fifo_front_p);
++ if (nr >= ref_nr)
++ break;
++
+ if (btree_node_journal_flush(b))
+ pr_err("BUG: flush_write bit should not be set here!");
+
+@@ -454,17 +496,44 @@ static void btree_flush_write(struct cache_set *c)
+ continue;
+ }
+
++ /*
++ * Only select the btree node which exactly references
++ * the oldest journal entry.
++ *
++ * If the journal entry pointed by fifo_front_p is
++ * reclaimed in parallel, don't worry:
++ * - the list_for_each_xxx loop will quit when checking
++ * next now_fifo_front_p.
++ * - If there are matched nodes recorded in btree_nodes[],
++ * they are clean now (this is why and how the oldest
++ * journal entry can be reclaimed). These selected nodes
++ * will be ignored and skipped in the folowing for-loop.
++ */
++ if (nr_to_fifo_front(btree_current_write(b)->journal,
++ fifo_front_p,
++ mask) != 0) {
++ mutex_unlock(&b->write_lock);
++ continue;
++ }
++
+ set_btree_node_journal_flush(b);
+
+ mutex_unlock(&b->write_lock);
+
+- btree_nodes[n++] = b;
+- if (n == BTREE_FLUSH_NR)
++ btree_nodes[nr++] = b;
++ /*
++ * To avoid holding c->bucket_lock too long time,
++ * only scan for BTREE_FLUSH_NR matched btree nodes
++ * at most. If there are more btree nodes reference
++ * the oldest journal entry, try to flush them next
++ * time when btree_flush_write() is called.
++ */
++ if (nr == BTREE_FLUSH_NR)
+ break;
+ }
+ mutex_unlock(&c->bucket_lock);
+
+- for (i = 0; i < n; i++) {
++ for (i = 0; i < nr; i++) {
+ b = btree_nodes[i];
+ if (!b) {
+ pr_err("BUG: btree_nodes[%d] is NULL", i);
+@@ -497,6 +566,7 @@ static void btree_flush_write(struct cache_set *c)
+ mutex_unlock(&b->write_lock);
+ }
+
++out:
+ spin_lock(&c->journal.flush_write_lock);
+ c->journal.btree_flushing = false;
+ spin_unlock(&c->journal.flush_write_lock);
+diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
+index 5042f9e94aee..fccb388ce179 100644
+--- a/drivers/media/i2c/adv748x/adv748x.h
++++ b/drivers/media/i2c/adv748x/adv748x.h
+@@ -394,10 +394,10 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
+
+ #define io_read(s, r) adv748x_read(s, ADV748X_PAGE_IO, r)
+ #define io_write(s, r, v) adv748x_write(s, ADV748X_PAGE_IO, r, v)
+-#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~m) | v)
++#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~(m)) | (v))
+
+ #define hdmi_read(s, r) adv748x_read(s, ADV748X_PAGE_HDMI, r)
+-#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, r+1)) & m)
++#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, (r)+1)) & (m))
+ #define hdmi_write(s, r, v) adv748x_write(s, ADV748X_PAGE_HDMI, r, v)
+
+ #define repeater_read(s, r) adv748x_read(s, ADV748X_PAGE_REPEATER, r)
+@@ -405,11 +405,11 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
+
+ #define sdp_read(s, r) adv748x_read(s, ADV748X_PAGE_SDP, r)
+ #define sdp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_SDP, r, v)
+-#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~m) | v)
++#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~(m)) | (v))
+
+ #define cp_read(s, r) adv748x_read(s, ADV748X_PAGE_CP, r)
+ #define cp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_CP, r, v)
+-#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~m) | v)
++#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~(m)) | (v))
+
+ #define tx_read(t, r) adv748x_read(t->state, t->page, r)
+ #define tx_write(t, r, v) adv748x_write(t->state, t->page, r, v)
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 420900852166..c366503c466d 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -758,6 +758,7 @@ config MFD_MAX77650
+ depends on OF || COMPILE_TEST
+ select MFD_CORE
+ select REGMAP_I2C
++ select REGMAP_IRQ
+ help
+ Say Y here to add support for Maxim Semiconductor MAX77650 and
+ MAX77651 Power Management ICs. This is the core multifunction
+diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
+index 85640ee11c86..d5326d19b136 100644
+--- a/drivers/mtd/nand/onenand/onenand_base.c
++++ b/drivers/mtd/nand/onenand/onenand_base.c
+@@ -1248,44 +1248,44 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
+
+ stats = mtd->ecc_stats;
+
+- /* Read-while-load method */
++ /* Read-while-load method */
+
+- /* Do first load to bufferRAM */
+- if (read < len) {
+- if (!onenand_check_bufferram(mtd, from)) {
++ /* Do first load to bufferRAM */
++ if (read < len) {
++ if (!onenand_check_bufferram(mtd, from)) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+- ret = this->wait(mtd, FL_READING);
+- onenand_update_bufferram(mtd, from, !ret);
++ ret = this->wait(mtd, FL_READING);
++ onenand_update_bufferram(mtd, from, !ret);
+ if (mtd_is_eccerr(ret))
+ ret = 0;
+- }
+- }
++ }
++ }
+
+ thislen = min_t(int, writesize, len - read);
+ column = from & (writesize - 1);
+ if (column + thislen > writesize)
+ thislen = writesize - column;
+
+- while (!ret) {
+- /* If there is more to load then start next load */
+- from += thislen;
+- if (read + thislen < len) {
++ while (!ret) {
++ /* If there is more to load then start next load */
++ from += thislen;
++ if (read + thislen < len) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+- /*
+- * Chip boundary handling in DDP
+- * Now we issued chip 1 read and pointed chip 1
++ /*
++ * Chip boundary handling in DDP
++ * Now we issued chip 1 read and pointed chip 1
+ * bufferram so we have to point chip 0 bufferram.
+- */
+- if (ONENAND_IS_DDP(this) &&
+- unlikely(from == (this->chipsize >> 1))) {
+- this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
+- boundary = 1;
+- } else
+- boundary = 0;
+- ONENAND_SET_PREV_BUFFERRAM(this);
+- }
+- /* While load is going, read from last bufferRAM */
+- this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
++ */
++ if (ONENAND_IS_DDP(this) &&
++ unlikely(from == (this->chipsize >> 1))) {
++ this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
++ boundary = 1;
++ } else
++ boundary = 0;
++ ONENAND_SET_PREV_BUFFERRAM(this);
++ }
++ /* While load is going, read from last bufferRAM */
++ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
+
+ /* Read oob area if needed */
+ if (oobbuf) {
+@@ -1301,24 +1301,24 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
+ oobcolumn = 0;
+ }
+
+- /* See if we are done */
+- read += thislen;
+- if (read == len)
+- break;
+- /* Set up for next read from bufferRAM */
+- if (unlikely(boundary))
+- this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
+- ONENAND_SET_NEXT_BUFFERRAM(this);
+- buf += thislen;
++ /* See if we are done */
++ read += thislen;
++ if (read == len)
++ break;
++ /* Set up for next read from bufferRAM */
++ if (unlikely(boundary))
++ this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
++ ONENAND_SET_NEXT_BUFFERRAM(this);
++ buf += thislen;
+ thislen = min_t(int, writesize, len - read);
+- column = 0;
+- cond_resched();
+- /* Now wait for load */
+- ret = this->wait(mtd, FL_READING);
+- onenand_update_bufferram(mtd, from, !ret);
++ column = 0;
++ cond_resched();
++ /* Now wait for load */
++ ret = this->wait(mtd, FL_READING);
++ onenand_update_bufferram(mtd, from, !ret);
+ if (mtd_is_eccerr(ret))
+ ret = 0;
+- }
++ }
+
+ /*
+ * Return success, if no ECC failures, else -EBADMSG
+diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
+index e5ea6127ab5a..671a61845bd5 100644
+--- a/drivers/mtd/parsers/sharpslpart.c
++++ b/drivers/mtd/parsers/sharpslpart.c
+@@ -165,10 +165,10 @@ static int sharpsl_nand_get_logical_num(u8 *oob)
+
+ static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
+ {
+- unsigned int block_num, log_num, phymax;
++ unsigned int block_num, phymax;
++ int i, ret, log_num;
+ loff_t block_adr;
+ u8 *oob;
+- int i, ret;
+
+ oob = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!oob)
+diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
+index 6aeed0c600f8..7971dc4f54f1 100644
+--- a/drivers/net/netdevsim/bus.c
++++ b/drivers/net/netdevsim/bus.c
+@@ -17,6 +17,7 @@
+ static DEFINE_IDA(nsim_bus_dev_ids);
+ static LIST_HEAD(nsim_bus_dev_list);
+ static DEFINE_MUTEX(nsim_bus_dev_list_lock);
++static bool nsim_bus_enable;
+
+ static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev)
+ {
+@@ -28,7 +29,7 @@ static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev,
+ {
+ nsim_bus_dev->vfconfigs = kcalloc(num_vfs,
+ sizeof(struct nsim_vf_config),
+- GFP_KERNEL);
++ GFP_KERNEL | __GFP_NOWARN);
+ if (!nsim_bus_dev->vfconfigs)
+ return -ENOMEM;
+ nsim_bus_dev->num_vfs = num_vfs;
+@@ -96,13 +97,25 @@ new_port_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
++ struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
++ struct devlink *devlink;
+ unsigned int port_index;
+ int ret;
+
++ /* Prevent to use nsim_bus_dev before initialization. */
++ if (!smp_load_acquire(&nsim_bus_dev->init))
++ return -EBUSY;
+ ret = kstrtouint(buf, 0, &port_index);
+ if (ret)
+ return ret;
++
++ devlink = priv_to_devlink(nsim_dev);
++
++ mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
++ devlink_reload_disable(devlink);
+ ret = nsim_dev_port_add(nsim_bus_dev, port_index);
++ devlink_reload_enable(devlink);
++ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
+ return ret ? ret : count;
+ }
+
+@@ -113,13 +126,25 @@ del_port_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
++ struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
++ struct devlink *devlink;
+ unsigned int port_index;
+ int ret;
+
++ /* Prevent to use nsim_bus_dev before initialization. */
++ if (!smp_load_acquire(&nsim_bus_dev->init))
++ return -EBUSY;
+ ret = kstrtouint(buf, 0, &port_index);
+ if (ret)
+ return ret;
++
++ devlink = priv_to_devlink(nsim_dev);
++
++ mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
++ devlink_reload_disable(devlink);
+ ret = nsim_dev_port_del(nsim_bus_dev, port_index);
++ devlink_reload_enable(devlink);
++ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
+ return ret ? ret : count;
+ }
+
+@@ -179,15 +204,30 @@ new_device_store(struct bus_type *bus, const char *buf, size_t count)
+ pr_err("Format for adding new device is \"id port_count\" (uint uint).\n");
+ return -EINVAL;
+ }
+- nsim_bus_dev = nsim_bus_dev_new(id, port_count);
+- if (IS_ERR(nsim_bus_dev))
+- return PTR_ERR(nsim_bus_dev);
+
+ mutex_lock(&nsim_bus_dev_list_lock);
++ /* Prevent to use resource before initialization. */
++ if (!smp_load_acquire(&nsim_bus_enable)) {
++ err = -EBUSY;
++ goto err;
++ }
++
++ nsim_bus_dev = nsim_bus_dev_new(id, port_count);
++ if (IS_ERR(nsim_bus_dev)) {
++ err = PTR_ERR(nsim_bus_dev);
++ goto err;
++ }
++
++ /* Allow using nsim_bus_dev */
++ smp_store_release(&nsim_bus_dev->init, true);
++
+ list_add_tail(&nsim_bus_dev->list, &nsim_bus_dev_list);
+ mutex_unlock(&nsim_bus_dev_list_lock);
+
+ return count;
++err:
++ mutex_unlock(&nsim_bus_dev_list_lock);
++ return err;
+ }
+ static BUS_ATTR_WO(new_device);
+
+@@ -215,6 +255,11 @@ del_device_store(struct bus_type *bus, const char *buf, size_t count)
+
+ err = -ENOENT;
+ mutex_lock(&nsim_bus_dev_list_lock);
++ /* Prevent to use resource before initialization. */
++ if (!smp_load_acquire(&nsim_bus_enable)) {
++ mutex_unlock(&nsim_bus_dev_list_lock);
++ return -EBUSY;
++ }
+ list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
+ if (nsim_bus_dev->dev.id != id)
+ continue;
+@@ -284,6 +329,9 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
+ nsim_bus_dev->dev.type = &nsim_bus_dev_type;
+ nsim_bus_dev->port_count = port_count;
+ nsim_bus_dev->initial_net = current->nsproxy->net_ns;
++ mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
++ /* Disallow using nsim_bus_dev */
++ smp_store_release(&nsim_bus_dev->init, false);
+
+ err = device_register(&nsim_bus_dev->dev);
+ if (err)
+@@ -299,6 +347,8 @@ err_nsim_bus_dev_free:
+
+ static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
+ {
++ /* Disallow using nsim_bus_dev */
++ smp_store_release(&nsim_bus_dev->init, false);
+ device_unregister(&nsim_bus_dev->dev);
+ ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
+ kfree(nsim_bus_dev);
+@@ -320,6 +370,8 @@ int nsim_bus_init(void)
+ err = driver_register(&nsim_driver);
+ if (err)
+ goto err_bus_unregister;
++ /* Allow using resources */
++ smp_store_release(&nsim_bus_enable, true);
+ return 0;
+
+ err_bus_unregister:
+@@ -331,12 +383,16 @@ void nsim_bus_exit(void)
+ {
+ struct nsim_bus_dev *nsim_bus_dev, *tmp;
+
++ /* Disallow using resources */
++ smp_store_release(&nsim_bus_enable, false);
++
+ mutex_lock(&nsim_bus_dev_list_lock);
+ list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
+ list_del(&nsim_bus_dev->list);
+ nsim_bus_dev_del(nsim_bus_dev);
+ }
+ mutex_unlock(&nsim_bus_dev_list_lock);
++
+ driver_unregister(&nsim_driver);
+ bus_unregister(&nsim_bus);
+ }
+diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
+index 2a945b3c7c76..54bc089550b3 100644
+--- a/drivers/net/netdevsim/dev.c
++++ b/drivers/net/netdevsim/dev.c
+@@ -88,8 +88,11 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
+ &nsim_dev->max_macs);
+ debugfs_create_bool("test1", 0600, nsim_dev->ddir,
+ &nsim_dev->test1);
+- debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, nsim_dev,
+- &nsim_dev_take_snapshot_fops);
++ nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
++ 0200,
++ nsim_dev->ddir,
++ nsim_dev,
++ &nsim_dev_take_snapshot_fops);
+ debugfs_create_bool("dont_allow_reload", 0600, nsim_dev->ddir,
+ &nsim_dev->dont_allow_reload);
+ debugfs_create_bool("fail_reload", 0600, nsim_dev->ddir,
+@@ -740,6 +743,11 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
+ if (err)
+ goto err_health_exit;
+
++ nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
++ 0200,
++ nsim_dev->ddir,
++ nsim_dev,
++ &nsim_dev_take_snapshot_fops);
+ return 0;
+
+ err_health_exit:
+@@ -853,6 +861,7 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
+
+ if (devlink_is_reload_failed(devlink))
+ return;
++ debugfs_remove(nsim_dev->take_snapshot);
+ nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_health_exit(nsim_dev);
+ nsim_dev_traps_exit(devlink);
+diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
+index 9aa637d162eb..c06e0f8fbc10 100644
+--- a/drivers/net/netdevsim/health.c
++++ b/drivers/net/netdevsim/health.c
+@@ -82,7 +82,7 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
+ if (err)
+ return err;
+
+- binary = kmalloc(binary_len, GFP_KERNEL);
++ binary = kmalloc(binary_len, GFP_KERNEL | __GFP_NOWARN);
+ if (!binary)
+ return -ENOMEM;
+ get_random_bytes(binary, binary_len);
+diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
+index 94df795ef4d3..2eb7b0dc1594 100644
+--- a/drivers/net/netdevsim/netdevsim.h
++++ b/drivers/net/netdevsim/netdevsim.h
+@@ -160,6 +160,7 @@ struct nsim_dev {
+ struct nsim_trap_data *trap_data;
+ struct dentry *ddir;
+ struct dentry *ports_ddir;
++ struct dentry *take_snapshot;
+ struct bpf_offload_dev *bpf_dev;
+ bool bpf_bind_accept;
+ u32 bpf_bind_verifier_delay;
+@@ -240,6 +241,9 @@ struct nsim_bus_dev {
+ */
+ unsigned int num_vfs;
+ struct nsim_vf_config *vfconfigs;
++ /* Lock for devlink->reload_enabled in netdevsim module */
++ struct mutex nsim_bus_reload_lock;
++ bool init;
+ };
+
+ int nsim_bus_init(void);
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index bb44f5a0941b..4822a65f6f3c 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -1604,11 +1604,22 @@ static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
+ {
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 i;
++ int ret;
++
++ mutex_lock(&ar->conf_mutex);
++ if (ar->state != ATH10K_STATE_ON) {
++ ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
++ ret = -EIO;
++ goto done;
++ }
+
+ for (i = 0; i < region->len; i += 4)
+ *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
+
+- return region->len;
++ ret = region->len;
++done:
++ mutex_unlock(&ar->conf_mutex);
++ return ret;
+ }
+
+ /* if an error happened returns < 0, otherwise the length */
+@@ -1704,7 +1715,11 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
+ count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
+ break;
+ case ATH10K_MEM_REGION_TYPE_IOREG:
+- count = ath10k_pci_dump_memory_reg(ar, current_region, buf);
++ ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
++ if (ret < 0)
++ break;
++
++ count = ret;
+ break;
+ default:
+ ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+index 9f4b117db9d7..d47f76890cf9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+@@ -8,6 +8,7 @@
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2019 Intel Corporation
++ * Copyright (C) 2020 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+@@ -30,6 +31,7 @@
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2019 Intel Corporation
++ * Copyright (C) 2020 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+@@ -389,6 +391,8 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
+ if (req != mvm->ftm_initiator.req)
+ return;
+
++ iwl_mvm_ftm_reset(mvm);
++
+ if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
+ LOCATION_GROUP, 0),
+ 0, sizeof(cmd), &cmd))
+@@ -502,7 +506,6 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!mvm->ftm_initiator.req) {
+- IWL_ERR(mvm, "Got FTM response but have no request?\n");
+ return;
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 6717f25c46b1..efdf15f57f16 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -3293,7 +3293,7 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ iwl_mvm_schedule_session_protection(mvm, vif, 900,
+- min_duration);
++ min_duration, false);
+ else
+ iwl_mvm_protect_session(mvm, vif, duration,
+ min_duration, 500, false);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+index 1851719e9f4b..d781777b6b96 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+@@ -205,9 +205,15 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
+
+- mutex_lock(&mvm->mutex);
+ /* Protect the session to hear the TDLS setup response on the channel */
+- iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
++ mutex_lock(&mvm->mutex);
++ if (fw_has_capa(&mvm->fw->ucode_capa,
++ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
++ iwl_mvm_schedule_session_protection(mvm, vif, duration,
++ duration, true);
++ else
++ iwl_mvm_protect_session(mvm, vif, duration,
++ duration, 100, true);
+ mutex_unlock(&mvm->mutex);
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+index 51b138673ddb..c0b420fe5e48 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+@@ -1056,13 +1056,42 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
+ return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+ }
+
++static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
++ struct iwl_rx_packet *pkt, void *data)
++{
++ struct iwl_mvm *mvm =
++ container_of(notif_wait, struct iwl_mvm, notif_wait);
++ struct iwl_mvm_session_prot_notif *resp;
++ int resp_len = iwl_rx_packet_payload_len(pkt);
++
++ if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
++ pkt->hdr.group_id != MAC_CONF_GROUP))
++ return true;
++
++ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
++ IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
++ return true;
++ }
++
++ resp = (void *)pkt->data;
++
++ if (!resp->status)
++ IWL_ERR(mvm,
++ "TIME_EVENT_NOTIFICATION received but not executed\n");
++
++ return true;
++}
++
+ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+- u32 duration, u32 min_duration)
++ u32 duration, u32 min_duration,
++ bool wait_for_notif)
+ {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+-
++ const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
++ MAC_CONF_GROUP, 0) };
++ struct iwl_notification_wait wait_notif;
+ struct iwl_mvm_session_prot_cmd cmd = {
+ .id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+@@ -1071,7 +1100,6 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
+ .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
+ .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
+ };
+- int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+@@ -1092,14 +1120,35 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
+ IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
+ le32_to_cpu(cmd.duration_tu));
+
+- ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
+- MAC_CONF_GROUP, 0),
+- 0, sizeof(cmd), &cmd);
+- if (ret) {
++ if (!wait_for_notif) {
++ if (iwl_mvm_send_cmd_pdu(mvm,
++ iwl_cmd_id(SESSION_PROTECTION_CMD,
++ MAC_CONF_GROUP, 0),
++ 0, sizeof(cmd), &cmd)) {
++ IWL_ERR(mvm,
++ "Couldn't send the SESSION_PROTECTION_CMD\n");
++ spin_lock_bh(&mvm->time_event_lock);
++ iwl_mvm_te_clear_data(mvm, te_data);
++ spin_unlock_bh(&mvm->time_event_lock);
++ }
++
++ return;
++ }
++
++ iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
++ notif, ARRAY_SIZE(notif),
++ iwl_mvm_session_prot_notif, NULL);
++
++ if (iwl_mvm_send_cmd_pdu(mvm,
++ iwl_cmd_id(SESSION_PROTECTION_CMD,
++ MAC_CONF_GROUP, 0),
++ 0, sizeof(cmd), &cmd)) {
+ IWL_ERR(mvm,
+- "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
+- spin_lock_bh(&mvm->time_event_lock);
+- iwl_mvm_te_clear_data(mvm, te_data);
+- spin_unlock_bh(&mvm->time_event_lock);
++ "Couldn't send the SESSION_PROTECTION_CMD\n");
++ iwl_remove_notification(&mvm->notif_wait, &wait_notif);
++ } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
++ TU_TO_JIFFIES(100))) {
++ IWL_ERR(mvm,
++ "Failed to protect session until session protection\n");
+ }
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+index df6832b79666..3186d7e40567 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+@@ -250,10 +250,12 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the protection issued
+ * @duration: the duration of the protection
++ * @wait_for_notif: if true, will block until the start of the protection
+ */
+ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+- u32 duration, u32 min_duration);
++ u32 duration, u32 min_duration,
++ bool wait_for_notif);
+
+ /**
+ * iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
+diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
+index c9401c121a14..4e3de684928b 100644
+--- a/drivers/net/wireless/marvell/libertas/cfg.c
++++ b/drivers/net/wireless/marvell/libertas/cfg.c
+@@ -1785,6 +1785,8 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
+ rates_max = rates_eid[1];
+ if (rates_max > MAX_RATES) {
+ lbs_deb_join("invalid rates");
++ rcu_read_unlock();
++ ret = -EINVAL;
+ goto out;
+ }
+ rates = cmd.bss.rates;
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
+index 98f942b797f7..a7968a84aaf8 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -2884,6 +2884,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
+ vs_param_set->header.len =
+ cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
+ & 0x00FF) + 2);
++ if (le16_to_cpu(vs_param_set->header.len) >
++ MWIFIEX_MAX_VSIE_LEN) {
++ mwifiex_dbg(priv->adapter, ERROR,
++ "Invalid param length!\n");
++ break;
++ }
++
+ memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
+ le16_to_cpu(vs_param_set->header.len));
+ *buffer += le16_to_cpu(vs_param_set->header.len) +
+diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
+index 41f0231376c0..132f9e8ed68c 100644
+--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
++++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
+@@ -970,6 +970,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
+ "WMM Parameter Set Count: %d\n",
+ wmm_param_ie->qos_info_bitmap & mask);
+
++ if (wmm_param_ie->vend_hdr.len + 2 >
++ sizeof(struct ieee_types_wmm_parameter))
++ break;
++
+ memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
+ wmm_ie, wmm_param_ie,
+ wmm_param_ie->vend_hdr.len + 2);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+index eccad4987ac8..17e277bf39e0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+@@ -92,8 +92,9 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
+
+ static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
+ {
+- u8 val, *eeprom = dev->mt76.eeprom.data;
++ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 tx_mask, rx_mask, max_nss;
++ u32 val;
+
+ val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
+ eeprom[MT_EE_WIFI_CONF]);
+diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
+index 090b632965e2..ac93f5a0398e 100644
+--- a/drivers/pci/controller/pci-tegra.c
++++ b/drivers/pci/controller/pci-tegra.c
+@@ -2499,7 +2499,6 @@ static const struct tegra_pcie_soc tegra20_pcie = {
+ .num_ports = 2,
+ .ports = tegra20_pcie_ports,
+ .msi_base_shift = 0,
+- .afi_pex2_ctrl = 0x128,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
+ .pads_refclk_cfg0 = 0xfa5cfa5c,
+@@ -2528,6 +2527,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
+ .num_ports = 3,
+ .ports = tegra30_pcie_ports,
+ .msi_base_shift = 8,
++ .afi_pex2_ctrl = 0x128,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0xfa5cfa5c,
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
+index 1e88fd427757..4d1f392b05f9 100644
+--- a/drivers/pci/iov.c
++++ b/drivers/pci/iov.c
+@@ -186,10 +186,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
+ sprintf(buf, "virtfn%u", id);
+ rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
+ if (rc)
+- goto failed2;
++ goto failed1;
+ rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
+ if (rc)
+- goto failed3;
++ goto failed2;
+
+ kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
+
+@@ -197,11 +197,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
+
+ return 0;
+
+-failed3:
+- sysfs_remove_link(&dev->dev.kobj, buf);
+ failed2:
+- pci_stop_and_remove_bus_device(virtfn);
++ sysfs_remove_link(&dev->dev.kobj, buf);
+ failed1:
++ pci_stop_and_remove_bus_device(virtfn);
+ pci_dev_put(dev);
+ failed0:
+ virtfn_remove_bus(dev->bus, bus);
+diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
+index 1ca86f2e0166..4a818b07a1af 100644
+--- a/drivers/pci/pcie/aer.c
++++ b/drivers/pci/pcie/aer.c
+@@ -1445,6 +1445,7 @@ static int aer_probe(struct pcie_device *dev)
+ return -ENOMEM;
+
+ rpc->rpd = port;
++ INIT_KFIFO(rpc->aer_fifo);
+ set_service_data(dev, rpc);
+
+ status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
+index f279826204eb..591161ce0f51 100644
+--- a/drivers/pci/setup-bus.c
++++ b/drivers/pci/setup-bus.c
+@@ -1803,12 +1803,18 @@ again:
+ /* Restore size and flags */
+ list_for_each_entry(fail_res, &fail_head, list) {
+ struct resource *res = fail_res->res;
++ int idx;
+
+ res->start = fail_res->start;
+ res->end = fail_res->end;
+ res->flags = fail_res->flags;
+- if (fail_res->dev->subordinate)
+- res->flags = 0;
++
++ if (pci_is_bridge(fail_res->dev)) {
++ idx = res - &fail_res->dev->resource[0];
++ if (idx >= PCI_BRIDGE_RESOURCES &&
++ idx <= PCI_BRIDGE_RESOURCE_END)
++ res->flags = 0;
++ }
+ }
+ free_list(&fail_head);
+
+@@ -2055,12 +2061,18 @@ again:
+ /* Restore size and flags */
+ list_for_each_entry(fail_res, &fail_head, list) {
+ struct resource *res = fail_res->res;
++ int idx;
+
+ res->start = fail_res->start;
+ res->end = fail_res->end;
+ res->flags = fail_res->flags;
+- if (fail_res->dev->subordinate)
+- res->flags = 0;
++
++ if (pci_is_bridge(fail_res->dev)) {
++ idx = res - &fail_res->dev->resource[0];
++ if (idx >= PCI_BRIDGE_RESOURCES &&
++ idx <= PCI_BRIDGE_RESOURCE_END)
++ res->flags = 0;
++ }
+ }
+ free_list(&fail_head);
+
+diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
+index 88091bbfe77f..9c3ad09d3022 100644
+--- a/drivers/pci/switch/switchtec.c
++++ b/drivers/pci/switch/switchtec.c
+@@ -1276,7 +1276,7 @@ static int switchtec_init_isr(struct switchtec_dev *stdev)
+ if (nvecs < 0)
+ return nvecs;
+
+- event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
++ event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
+ if (event_irq < 0 || event_irq >= nvecs)
+ return -EFAULT;
+
+@@ -1349,7 +1349,7 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
+ if (rc)
+ return rc;
+
+- rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
++ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ return rc;
+
+diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
+index 55141d5de29e..72ffd19448e5 100644
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -107,6 +107,7 @@ struct byt_gpio_pin_context {
+
+ struct byt_gpio {
+ struct gpio_chip chip;
++ struct irq_chip irqchip;
+ struct platform_device *pdev;
+ struct pinctrl_dev *pctl_dev;
+ struct pinctrl_desc pctl_desc;
+@@ -1395,15 +1396,6 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
+ return 0;
+ }
+
+-static struct irq_chip byt_irqchip = {
+- .name = "BYT-GPIO",
+- .irq_ack = byt_irq_ack,
+- .irq_mask = byt_irq_mask,
+- .irq_unmask = byt_irq_unmask,
+- .irq_set_type = byt_irq_type,
+- .flags = IRQCHIP_SKIP_SET_WAKE,
+-};
+-
+ static void byt_gpio_irq_handler(struct irq_desc *desc)
+ {
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+@@ -1551,8 +1543,15 @@ static int byt_gpio_probe(struct byt_gpio *vg)
+ if (irq_rc && irq_rc->start) {
+ struct gpio_irq_chip *girq;
+
++ vg->irqchip.name = "BYT-GPIO",
++ vg->irqchip.irq_ack = byt_irq_ack,
++ vg->irqchip.irq_mask = byt_irq_mask,
++ vg->irqchip.irq_unmask = byt_irq_unmask,
++ vg->irqchip.irq_set_type = byt_irq_type,
++ vg->irqchip.flags = IRQCHIP_SKIP_SET_WAKE,
++
+ girq = &gc->irq;
+- girq->chip = &byt_irqchip;
++ girq->chip = &vg->irqchip;
+ girq->init_hw = byt_gpio_irq_init_hw;
+ girq->parent_handler = byt_gpio_irq_handler;
+ girq->num_parents = 1;
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index 5d6f9f61ce02..9a8daa256a32 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -960,7 +960,6 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+- unsigned long flags;
+
+ /*
+ * While they may not wake up when the TLMM is powered off,
+@@ -971,12 +970,8 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+ if (d->parent_data)
+ irq_chip_set_wake_parent(d, on);
+
+- raw_spin_lock_irqsave(&pctrl->lock, flags);
+-
+ irq_set_irq_wake(pctrl->irq, on);
+
+- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+-
+ return 0;
+ }
+
+diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+index 24866a5958ae..a9875038ed9b 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
++++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+@@ -2305,7 +2305,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ FN_ATAG0_A, 0, FN_REMOCON_B, 0,
+ /* IP0_11_8 [4] */
+ FN_SD1_DAT2_A, FN_MMC_D2, 0, FN_BS,
+- FN_ATADIR0_A, 0, FN_SDSELF_B, 0,
++ FN_ATADIR0_A, 0, FN_SDSELF_A, 0,
+ FN_PWM4_B, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP0_7_5 [3] */
+@@ -2349,7 +2349,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ FN_TS_SDAT0_A, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP1_10_8 [3] */
+- FN_SD1_CLK_B, FN_MMC_D6, 0, FN_A24,
++ FN_SD1_CD_A, FN_MMC_D6, 0, FN_A24,
+ FN_DREQ1_A, 0, FN_HRX0_B, FN_TS_SPSYNC0_A,
+ /* IP1_7_5 [3] */
+ FN_A23, FN_HTX0_B, FN_TX2_B, FN_DACK2_A,
+diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+index 8bdf33c807f6..6616f5210b9d 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
++++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+@@ -5998,7 +5998,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+ { PIN_DU_DOTCLKIN1, 0, 2 }, /* DU_DOTCLKIN1 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
+- { PIN_DU_DOTCLKIN3, 28, 2 }, /* DU_DOTCLKIN3 */
++ { PIN_DU_DOTCLKIN3, 24, 2 }, /* DU_DOTCLKIN3 */
+ { PIN_FSCLKST, 20, 2 }, /* FSCLKST */
+ { PIN_TMS, 4, 2 }, /* TMS */
+ } },
+@@ -6254,8 +6254,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ [31] = PIN_DU_DOTCLKIN1, /* DU_DOTCLKIN1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+- [ 0] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
+- [ 1] = SH_PFC_PIN_NONE,
++ [ 0] = SH_PFC_PIN_NONE,
++ [ 1] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
+ [ 2] = PIN_FSCLKST, /* FSCLKST */
+ [ 3] = PIN_EXTALR, /* EXTALR*/
+ [ 4] = PIN_TRST_N, /* TRST# */
+diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
+index 292bace83f1e..6f436836fe50 100644
+--- a/drivers/platform/x86/intel_mid_powerbtn.c
++++ b/drivers/platform/x86/intel_mid_powerbtn.c
+@@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev)
+
+ input_set_capability(input, EV_KEY, KEY_POWER);
+
+- ddata = (struct mid_pb_ddata *)id->driver_data;
++ ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data,
++ sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+- return -ENODATA;
++ return -ENOMEM;
+
+ ddata->dev = &pdev->dev;
+ ddata->irq = irq;
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 033303708c8b..cb28bbdc9e17 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -850,7 +850,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
+ rtc_cmos_int_handler = cmos_interrupt;
+
+ retval = request_irq(rtc_irq, rtc_cmos_int_handler,
+- IRQF_SHARED, dev_name(&cmos_rtc.rtc->dev),
++ 0, dev_name(&cmos_rtc.rtc->dev),
+ cmos_rtc.rtc);
+ if (retval < 0) {
+ dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
+diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
+index 443f6d05ce29..fb6d7967ec00 100644
+--- a/drivers/rtc/rtc-hym8563.c
++++ b/drivers/rtc/rtc-hym8563.c
+@@ -97,7 +97,7 @@ static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
+
+ if (!hym8563->valid) {
+ dev_warn(&client->dev, "no valid clock/calendar values available\n");
+- return -EPERM;
++ return -EINVAL;
+ }
+
+ ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf);
+diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
+index 9135e2101752..cda238dfe69b 100644
+--- a/drivers/rtc/rtc-mt6397.c
++++ b/drivers/rtc/rtc-mt6397.c
+@@ -297,15 +297,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
+
+ rtc->rtc_dev->ops = &mtk_rtc_ops;
+
+- ret = rtc_register_device(rtc->rtc_dev);
+- if (ret)
+- goto out_free_irq;
+-
+- return 0;
+-
+-out_free_irq:
+- free_irq(rtc->irq, rtc);
+- return ret;
++ return rtc_register_device(rtc->rtc_dev);
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 03173f06ab96..3fbf9ea16c64 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -7030,7 +7030,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
+ ufshcd_init_icc_levels(hba);
+
+ /* Add required well known logical units to scsi mid layer */
+- if (ufshcd_scsi_add_wlus(hba))
++ ret = ufshcd_scsi_add_wlus(hba);
++ if (ret)
+ goto out;
+
+ /* Initialize devfreq after UFS device is detected */
+diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
+index 5741ec3fa814..51850cc68b70 100644
+--- a/drivers/soc/qcom/rpmhpd.c
++++ b/drivers/soc/qcom/rpmhpd.c
+@@ -93,6 +93,7 @@ static struct rpmhpd sdm845_mx = {
+
+ static struct rpmhpd sdm845_mx_ao = {
+ .pd = { .name = "mx_ao", },
++ .active_only = true,
+ .peer = &sdm845_mx,
+ .res_name = "mx.lvl",
+ };
+@@ -107,6 +108,7 @@ static struct rpmhpd sdm845_cx = {
+
+ static struct rpmhpd sdm845_cx_ao = {
+ .pd = { .name = "cx_ao", },
++ .active_only = true,
+ .peer = &sdm845_cx,
+ .parent = &sdm845_mx_ao.pd,
+ .res_name = "cx.lvl",
+diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
+index a494543d3ae1..eb47fe5ed280 100644
+--- a/drivers/watchdog/qcom-wdt.c
++++ b/drivers/watchdog/qcom-wdt.c
+@@ -246,7 +246,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
+ }
+
+ /* check if there is pretimeout support */
+- irq = platform_get_irq(pdev, 0);
++ irq = platform_get_irq_optional(pdev, 0);
+ if (irq > 0) {
+ ret = devm_request_irq(dev, irq, qcom_wdt_isr,
+ IRQF_TRIGGER_RISING,
+diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
+index a3a329011a06..25188d6bbe15 100644
+--- a/drivers/watchdog/stm32_iwdg.c
++++ b/drivers/watchdog/stm32_iwdg.c
+@@ -262,6 +262,24 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
+ watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
+ watchdog_init_timeout(wdd, 0, dev);
+
++ /*
++ * In case of CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED is set
++ * (Means U-Boot/bootloaders leaves the watchdog running)
++ * When we get here we should make a decision to prevent
++ * any side effects before user space daemon will take care of it.
++ * The best option, taking into consideration that there is no
++ * way to read values back from hardware, is to enforce watchdog
++ * being run with deterministic values.
++ */
++ if (IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) {
++ ret = stm32_iwdg_start(wdd);
++ if (ret)
++ return ret;
++
++ /* Make sure the watchdog is serviced */
++ set_bit(WDOG_HW_RUNNING, &wdd->status);
++ }
++
+ ret = devm_watchdog_register_device(dev, wdd);
+ if (ret)
+ return ret;
+diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
+index 295a7a21b774..e7dd07f47825 100644
+--- a/fs/nfs/Kconfig
++++ b/fs/nfs/Kconfig
+@@ -90,7 +90,7 @@ config NFS_V4
+ config NFS_SWAP
+ bool "Provide swap over NFS support"
+ default n
+- depends on NFS_FS
++ depends on NFS_FS && SWAP
+ select SUNRPC_SWAP
+ help
+ This option enables swapon to work on files located on NFS mounts.
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 040a50fd9bf3..29f00da8a0b7 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -245,10 +245,10 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
+ data->ds_commit_index);
+
+ /* verifier not set so always fail */
+- if (verfp->committed < 0)
++ if (verfp->committed < 0 || data->res.verf->committed <= NFS_UNSTABLE)
+ return 1;
+
+- return nfs_direct_cmp_verf(verfp, &data->verf);
++ return nfs_direct_cmp_verf(verfp, data->res.verf);
+ }
+
+ /**
+diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
+index 927eb680f161..69971f6c840d 100644
+--- a/fs/nfs/nfs3xdr.c
++++ b/fs/nfs/nfs3xdr.c
+@@ -2334,6 +2334,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
+ void *data)
+ {
+ struct nfs_commitres *result = data;
++ struct nfs_writeverf *verf = result->verf;
+ enum nfs_stat status;
+ int error;
+
+@@ -2346,7 +2347,9 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
+ result->op_status = status;
+ if (status != NFS3_OK)
+ goto out_status;
+- error = decode_writeverf3(xdr, &result->verf->verifier);
++ error = decode_writeverf3(xdr, &verf->verifier);
++ if (!error)
++ verf->committed = NFS_FILE_SYNC;
+ out:
+ return error;
+ out_status:
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 1fe83e0f663e..9637aad36bdc 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -61,8 +61,11 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+
+ status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
+ lock, FMODE_WRITE);
+- if (status)
++ if (status) {
++ if (status == -EAGAIN)
++ status = -NFS4ERR_BAD_STATEID;
+ return status;
++ }
+
+ res.falloc_fattr = nfs_alloc_fattr();
+ if (!res.falloc_fattr)
+@@ -287,8 +290,11 @@ static ssize_t _nfs42_proc_copy(struct file *src,
+ } else {
+ status = nfs4_set_rw_stateid(&args->src_stateid,
+ src_lock->open_context, src_lock, FMODE_READ);
+- if (status)
++ if (status) {
++ if (status == -EAGAIN)
++ status = -NFS4ERR_BAD_STATEID;
+ return status;
++ }
+ }
+ status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
+ pos_src, pos_src + (loff_t)count - 1);
+@@ -297,8 +303,11 @@ static ssize_t _nfs42_proc_copy(struct file *src,
+
+ status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
+ dst_lock, FMODE_WRITE);
+- if (status)
++ if (status) {
++ if (status == -EAGAIN)
++ status = -NFS4ERR_BAD_STATEID;
+ return status;
++ }
+
+ status = nfs_sync_inode(dst_inode);
+ if (status)
+@@ -546,8 +555,11 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
+ status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
+ FMODE_READ);
+ nfs_put_lock_context(l_ctx);
+- if (status)
++ if (status) {
++ if (status == -EAGAIN)
++ status = -NFS4ERR_BAD_STATEID;
+ return status;
++ }
+
+ status = nfs4_call_sync(src_server->client, src_server, &msg,
+ &args->cna_seq_args, &res->cnr_seq_res, 0);
+@@ -618,8 +630,11 @@ static loff_t _nfs42_proc_llseek(struct file *filep,
+
+ status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
+ lock, FMODE_READ);
+- if (status)
++ if (status) {
++ if (status == -EAGAIN)
++ status = -NFS4ERR_BAD_STATEID;
+ return status;
++ }
+
+ status = nfs_filemap_write_and_wait_range(inode->i_mapping,
+ offset, LLONG_MAX);
+@@ -994,13 +1009,18 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
+
+ status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
+ src_lock, FMODE_READ);
+- if (status)
++ if (status) {
++ if (status == -EAGAIN)
++ status = -NFS4ERR_BAD_STATEID;
+ return status;
+-
++ }
+ status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
+ dst_lock, FMODE_WRITE);
+- if (status)
++ if (status) {
++ if (status == -EAGAIN)
++ status = -NFS4ERR_BAD_STATEID;
+ return status;
++ }
+
+ res.dst_fattr = nfs_alloc_fattr();
+ if (!res.dst_fattr)
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index a7a73b1d1fec..a5db055e2a9b 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -446,9 +446,7 @@ extern void nfs4_schedule_state_renewal(struct nfs_client *);
+ extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
+ extern void nfs4_kill_renewd(struct nfs_client *);
+ extern void nfs4_renew_state(struct work_struct *);
+-extern void nfs4_set_lease_period(struct nfs_client *clp,
+- unsigned long lease,
+- unsigned long lastrenewed);
++extern void nfs4_set_lease_period(struct nfs_client *clp, unsigned long lease);
+
+
+ /* nfs4state.c */
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 76d37161409a..a2759b4062ae 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3187,6 +3187,11 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
+ exception.retry = 1;
+ continue;
+ }
++ if (status == -NFS4ERR_EXPIRED) {
++ nfs4_schedule_lease_recovery(server->nfs_client);
++ exception.retry = 1;
++ continue;
++ }
+ if (status == -EAGAIN) {
+ /* We must have found a delegation */
+ exception.retry = 1;
+@@ -3239,6 +3244,8 @@ static int _nfs4_do_setattr(struct inode *inode,
+ nfs_put_lock_context(l_ctx);
+ if (status == -EIO)
+ return -EBADF;
++ else if (status == -EAGAIN)
++ goto zero_stateid;
+ } else {
+ zero_stateid:
+ nfs4_stateid_copy(&arg->stateid, &zero_stateid);
+@@ -5019,16 +5026,13 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str
+ struct nfs4_exception exception = {
+ .interruptible = true,
+ };
+- unsigned long now = jiffies;
+ int err;
+
+ do {
+ err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
+ trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
+ if (err == 0) {
+- nfs4_set_lease_period(server->nfs_client,
+- fsinfo->lease_time * HZ,
+- now);
++ nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
+ break;
+ }
+ err = nfs4_handle_exception(server, err, &exception);
+@@ -6084,6 +6088,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
+ .callback_data = &setclientid,
+ .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
+ };
++ unsigned long now = jiffies;
+ int status;
+
+ /* nfs_client_id4 */
+@@ -6116,6 +6121,9 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
+ clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
+ put_rpccred(setclientid.sc_cred);
+ }
++
++ if (status == 0)
++ do_renew_lease(clp, now);
+ out:
+ trace_nfs4_setclientid(clp, status);
+ dprintk("NFS reply setclientid: %d\n", status);
+@@ -8203,6 +8211,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
+ struct rpc_task *task;
+ struct nfs41_exchange_id_args *argp;
+ struct nfs41_exchange_id_res *resp;
++ unsigned long now = jiffies;
+ int status;
+
+ task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
+@@ -8223,6 +8232,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
+ if (status != 0)
+ goto out;
+
++ do_renew_lease(clp, now);
++
+ clp->cl_clientid = resp->clientid;
+ clp->cl_exchange_flags = resp->flags;
+ clp->cl_seqid = resp->seqid;
+diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
+index 6ea431b067dd..ff876dda7f06 100644
+--- a/fs/nfs/nfs4renewd.c
++++ b/fs/nfs/nfs4renewd.c
+@@ -138,15 +138,12 @@ nfs4_kill_renewd(struct nfs_client *clp)
+ *
+ * @clp: pointer to nfs_client
+ * @lease: new value for lease period
+- * @lastrenewed: time at which lease was last renewed
+ */
+ void nfs4_set_lease_period(struct nfs_client *clp,
+- unsigned long lease,
+- unsigned long lastrenewed)
++ unsigned long lease)
+ {
+ spin_lock(&clp->cl_lock);
+ clp->cl_lease_time = lease;
+- clp->cl_last_renewal = lastrenewed;
+ spin_unlock(&clp->cl_lock);
+
+ /* Cap maximum reconnect timeout at 1/2 lease period */
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 34552329233d..f0b002734355 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -92,17 +92,15 @@ static int nfs4_setup_state_renewal(struct nfs_client *clp)
+ {
+ int status;
+ struct nfs_fsinfo fsinfo;
+- unsigned long now;
+
+ if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
+ nfs4_schedule_state_renewal(clp);
+ return 0;
+ }
+
+- now = jiffies;
+ status = nfs4_proc_get_lease_time(clp, &fsinfo);
+ if (status == 0) {
+- nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
++ nfs4_set_lease_period(clp, fsinfo.lease_time * HZ);
+ nfs4_schedule_state_renewal(clp);
+ }
+
+diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
+index e60b6fbd5ada..d405557cb43f 100644
+--- a/fs/nfs/nfs4trace.h
++++ b/fs/nfs/nfs4trace.h
+@@ -352,7 +352,7 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
+ ),
+
+ TP_fast_assign(
+- __entry->error = error;
++ __entry->error = error < 0 ? -error : 0;
+ __assign_str(dstaddr, clp->cl_hostname);
+ ),
+
+@@ -432,7 +432,8 @@ TRACE_EVENT(nfs4_sequence_done,
+ __entry->target_highest_slotid =
+ res->sr_target_highest_slotid;
+ __entry->status_flags = res->sr_status_flags;
+- __entry->error = res->sr_status;
++ __entry->error = res->sr_status < 0 ?
++ -res->sr_status : 0;
+ ),
+ TP_printk(
+ "error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u "
+@@ -640,7 +641,7 @@ TRACE_EVENT(nfs4_state_mgr_failed,
+ ),
+
+ TP_fast_assign(
+- __entry->error = status;
++ __entry->error = status < 0 ? -status : 0;
+ __entry->state = clp->cl_state;
+ __assign_str(hostname, clp->cl_hostname);
+ __assign_str(section, section);
+@@ -659,7 +660,7 @@ TRACE_EVENT(nfs4_xdr_status,
+ TP_PROTO(
+ const struct xdr_stream *xdr,
+ u32 op,
+- int error
++ u32 error
+ ),
+
+ TP_ARGS(xdr, op, error),
+@@ -849,7 +850,7 @@ TRACE_EVENT(nfs4_close,
+ __entry->fileid = NFS_FILEID(inode);
+ __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
+ __entry->fmode = (__force unsigned int)state->state;
+- __entry->error = error;
++ __entry->error = error < 0 ? -error : 0;
+ __entry->stateid_seq =
+ be32_to_cpu(args->stateid.seqid);
+ __entry->stateid_hash =
+@@ -914,7 +915,7 @@ DECLARE_EVENT_CLASS(nfs4_lock_event,
+ TP_fast_assign(
+ const struct inode *inode = state->inode;
+
+- __entry->error = error;
++ __entry->error = error < 0 ? -error : 0;
+ __entry->cmd = cmd;
+ __entry->type = request->fl_type;
+ __entry->start = request->fl_start;
+@@ -986,7 +987,7 @@ TRACE_EVENT(nfs4_set_lock,
+ TP_fast_assign(
+ const struct inode *inode = state->inode;
+
+- __entry->error = error;
++ __entry->error = error < 0 ? -error : 0;
+ __entry->cmd = cmd;
+ __entry->type = request->fl_type;
+ __entry->start = request->fl_start;
+@@ -1164,7 +1165,7 @@ TRACE_EVENT(nfs4_delegreturn_exit,
+ TP_fast_assign(
+ __entry->dev = res->server->s_dev;
+ __entry->fhandle = nfs_fhandle_hash(args->fhandle);
+- __entry->error = error;
++ __entry->error = error < 0 ? -error : 0;
+ __entry->stateid_seq =
+ be32_to_cpu(args->stateid->seqid);
+ __entry->stateid_hash =
+@@ -1204,7 +1205,7 @@ DECLARE_EVENT_CLASS(nfs4_test_stateid_event,
+ TP_fast_assign(
+ const struct inode *inode = state->inode;
+
+- __entry->error = error;
++ __entry->error = error < 0 ? -error : 0;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = NFS_FILEID(inode);
+ __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
+@@ -1306,7 +1307,7 @@ TRACE_EVENT(nfs4_lookupp,
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = NFS_FILEID(inode);
+- __entry->error = error;
++ __entry->error = error < 0 ? -error : 0;
+ ),
+
+ TP_printk(
+@@ -1342,7 +1343,7 @@ TRACE_EVENT(nfs4_rename,
+ __entry->dev = olddir->i_sb->s_dev;
+ __entry->olddir = NFS_FILEID(olddir);
+ __entry->newdir = NFS_FILEID(newdir);
+- __entry->error = error;
++ __entry->error = error < 0 ? -error : 0;
+ __assign_str(oldname, oldname->name);
+ __assign_str(newname, newname->name);
+ ),
+@@ -1433,7 +1434,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_event,
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = NFS_FILEID(inode);
+ __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
+- __entry->error = error;
++ __entry->error = error < 0 ? -error : 0;
+ __entry->stateid_seq =
+ be32_to_cpu(stateid->seqid);
+ __entry->stateid_hash =
+@@ -1489,7 +1490,7 @@ DECLARE_EVENT_CLASS(nfs4_getattr_event,
+ __entry->valid = fattr->valid;
+ __entry->fhandle = nfs_fhandle_hash(fhandle);
+ __entry->fileid = (fattr->valid & NFS_ATTR_FATTR_FILEID) ? fattr->fileid : 0;
+- __entry->error = error;
++ __entry->error = error < 0 ? -error : 0;
+ ),
+
+ TP_printk(
+@@ -1536,7 +1537,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
+ ),
+
+ TP_fast_assign(
+- __entry->error = error;
++ __entry->error = error < 0 ? -error : 0;
+ __entry->fhandle = nfs_fhandle_hash(fhandle);
+ if (!IS_ERR_OR_NULL(inode)) {
+ __entry->fileid = NFS_FILEID(inode);
+@@ -1593,7 +1594,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
+ ),
+
+ TP_fast_assign(
+- __entry->error = error;
++ __entry->error = error < 0 ? -error : 0;
+ __entry->fhandle = nfs_fhandle_hash(fhandle);
+ if (!IS_ERR_OR_NULL(inode)) {
+ __entry->fileid = NFS_FILEID(inode);
+@@ -1896,7 +1897,7 @@ TRACE_EVENT(nfs4_layoutget,
+ __entry->iomode = args->iomode;
+ __entry->offset = args->offset;
+ __entry->count = args->length;
+- __entry->error = error;
++ __entry->error = error < 0 ? -error : 0;
+ __entry->stateid_seq =
+ be32_to_cpu(state->stateid.seqid);
+ __entry->stateid_hash =
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 936c57779ff4..d0feef17db50 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -4313,11 +4313,14 @@ static int decode_write_verifier(struct xdr_stream *xdr, struct nfs_write_verifi
+
+ static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res)
+ {
++ struct nfs_writeverf *verf = res->verf;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_COMMIT);
+ if (!status)
+- status = decode_write_verifier(xdr, &res->verf->verifier);
++ status = decode_write_verifier(xdr, &verf->verifier);
++ if (!status)
++ verf->committed = NFS_FILE_SYNC;
+ return status;
+ }
+
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index cec3070ab577..542ea8dfd1bc 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1425,7 +1425,7 @@ retry:
+ /* lo ref dropped in pnfs_roc_release() */
+ layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
+ /* If the creds don't match, we can't compound the layoutreturn */
+- if (!layoutreturn || cred != lo->plh_lc_cred)
++ if (!layoutreturn || cred_fscmp(cred, lo->plh_lc_cred) != 0)
+ goto out_noroc;
+
+ roc = layoutreturn;
+@@ -1998,8 +1998,6 @@ lookup_again:
+ trace_pnfs_update_layout(ino, pos, count,
+ iomode, lo, lseg,
+ PNFS_UPDATE_LAYOUT_INVALID_OPEN);
+- if (status != -EAGAIN)
+- goto out_unlock;
+ spin_unlock(&ino->i_lock);
+ nfs4_schedule_stateid_recovery(server, ctx->state);
+ pnfs_clear_first_layoutget(lo);
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
+index 82af4809b869..8b37e7f8e789 100644
+--- a/fs/nfs/pnfs_nfs.c
++++ b/fs/nfs/pnfs_nfs.c
+@@ -31,12 +31,11 @@ EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
+ /* Fake up some data that will cause nfs_commit_release to retry the writes. */
+ void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
+ {
+- struct nfs_page *first = nfs_list_entry(data->pages.next);
++ struct nfs_writeverf *verf = data->res.verf;
+
+ data->task.tk_status = 0;
+- memcpy(&data->verf.verifier, &first->wb_verf,
+- sizeof(data->verf.verifier));
+- data->verf.verifier.data[0]++; /* ensure verifier mismatch */
++ memset(&verf->verifier, 0, sizeof(verf->verifier));
++ verf->committed = NFS_UNSTABLE;
+ }
+ EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
+
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 52cab65f91cf..913eb37c249b 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -243,7 +243,15 @@ out:
+ /* A writeback failed: mark the page as bad, and invalidate the page cache */
+ static void nfs_set_pageerror(struct address_space *mapping)
+ {
++ struct inode *inode = mapping->host;
++
+ nfs_zap_mapping(mapping->host, mapping);
++ /* Force file size revalidation */
++ spin_lock(&inode->i_lock);
++ NFS_I(inode)->cache_validity |= NFS_INO_REVAL_FORCED |
++ NFS_INO_REVAL_PAGECACHE |
++ NFS_INO_INVALID_SIZE;
++ spin_unlock(&inode->i_lock);
+ }
+
+ static void nfs_mapping_set_error(struct page *page, int error)
+@@ -1829,6 +1837,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
+
+ static void nfs_commit_release_pages(struct nfs_commit_data *data)
+ {
++ const struct nfs_writeverf *verf = data->res.verf;
+ struct nfs_page *req;
+ int status = data->task.tk_status;
+ struct nfs_commit_info cinfo;
+@@ -1856,7 +1865,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
+
+ /* Okay, COMMIT succeeded, apparently. Check the verifier
+ * returned by the server against all stored verfs. */
+- if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
++ if (verf->committed > NFS_UNSTABLE &&
++ !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) {
+ /* We have a match */
+ if (req->wb_page)
+ nfs_inode_remove_request(req);
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 27200dea0297..a24937fc56b9 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -461,6 +461,11 @@ struct mlx5_vf_context {
+ int enabled;
+ u64 port_guid;
+ u64 node_guid;
++ /* Valid bits are used to validate administrative guid only.
++ * Enabled after ndo_set_vf_guid
++ */
++ u8 port_guid_valid:1;
++ u8 node_guid_valid:1;
+ enum port_state_policy policy;
+ };
+
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 5608e14e3aad..8d0f447e1faa 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -4300,6 +4300,9 @@ static inline int ib_check_mr_access(int flags)
+ !(flags & IB_ACCESS_LOCAL_WRITE))
+ return -EINVAL;
+
++ if (flags & ~IB_ACCESS_SUPPORTED)
++ return -EINVAL;
++
+ return 0;
+ }
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 90e4b00ace89..bfe756dee129 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7100,6 +7100,12 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
+
+ if (parent)
+ sched_online_group(tg, parent);
++
++#ifdef CONFIG_UCLAMP_TASK_GROUP
++ /* Propagate the effective uclamp value for the new group */
++ cpu_util_update_eff(css);
++#endif
++
+ return 0;
+ }
+
+diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
+index 458be6b3eda9..3ab23f698221 100644
+--- a/net/core/bpf_sk_storage.c
++++ b/net/core/bpf_sk_storage.c
+@@ -643,9 +643,10 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
+ return ERR_PTR(-ENOMEM);
+ bpf_map_init_from_attr(&smap->map, attr);
+
++ nbuckets = roundup_pow_of_two(num_possible_cpus());
+ /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
+- smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus())));
+- nbuckets = 1U << smap->bucket_log;
++ nbuckets = max_t(u32, 2, nbuckets);
++ smap->bucket_log = ilog2(nbuckets);
+ cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
+
+ ret = bpf_map_charge_init(&smap->map.memory, cost);
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 8998e356f423..085cef5857bb 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -234,7 +234,6 @@ static void sock_map_free(struct bpf_map *map)
+ int i;
+
+ synchronize_rcu();
+- rcu_read_lock();
+ raw_spin_lock_bh(&stab->lock);
+ for (i = 0; i < stab->map.max_entries; i++) {
+ struct sock **psk = &stab->sks[i];
+@@ -243,13 +242,15 @@ static void sock_map_free(struct bpf_map *map)
+ sk = xchg(psk, NULL);
+ if (sk) {
+ lock_sock(sk);
++ rcu_read_lock();
+ sock_map_unref(sk, psk);
++ rcu_read_unlock();
+ release_sock(sk);
+ }
+ }
+ raw_spin_unlock_bh(&stab->lock);
+- rcu_read_unlock();
+
++ /* wait for psock readers accessing its map link */
+ synchronize_rcu();
+
+ bpf_map_area_free(stab->sks);
+@@ -416,14 +417,16 @@ static int sock_map_update_elem(struct bpf_map *map, void *key,
+ ret = -EINVAL;
+ goto out;
+ }
+- if (!sock_map_sk_is_suitable(sk) ||
+- sk->sk_state != TCP_ESTABLISHED) {
++ if (!sock_map_sk_is_suitable(sk)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ sock_map_sk_acquire(sk);
+- ret = sock_map_update_common(map, idx, sk, flags);
++ if (sk->sk_state != TCP_ESTABLISHED)
++ ret = -EOPNOTSUPP;
++ else
++ ret = sock_map_update_common(map, idx, sk, flags);
+ sock_map_sk_release(sk);
+ out:
+ fput(sock->file);
+@@ -739,14 +742,16 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key,
+ ret = -EINVAL;
+ goto out;
+ }
+- if (!sock_map_sk_is_suitable(sk) ||
+- sk->sk_state != TCP_ESTABLISHED) {
++ if (!sock_map_sk_is_suitable(sk)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ sock_map_sk_acquire(sk);
+- ret = sock_hash_update_common(map, key, sk, flags);
++ if (sk->sk_state != TCP_ESTABLISHED)
++ ret = -EOPNOTSUPP;
++ else
++ ret = sock_hash_update_common(map, key, sk, flags);
+ sock_map_sk_release(sk);
+ out:
+ fput(sock->file);
+@@ -859,19 +864,22 @@ static void sock_hash_free(struct bpf_map *map)
+ int i;
+
+ synchronize_rcu();
+- rcu_read_lock();
+ for (i = 0; i < htab->buckets_num; i++) {
+ bucket = sock_hash_select_bucket(htab, i);
+ raw_spin_lock_bh(&bucket->lock);
+ hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
+ hlist_del_rcu(&elem->node);
+ lock_sock(elem->sk);
++ rcu_read_lock();
+ sock_map_unref(elem->sk, elem);
++ rcu_read_unlock();
+ release_sock(elem->sk);
+ }
+ raw_spin_unlock_bh(&bucket->lock);
+ }
+- rcu_read_unlock();
++
++ /* wait for psock readers accessing its map link */
++ synchronize_rcu();
+
+ bpf_map_area_free(htab->buckets);
+ kfree(htab);
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index e33a73cb1f42..86eefb613b08 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -348,9 +348,6 @@ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
+ {
+ struct nf_flowtable *flow_table = data;
+
+- if (flow->flags & FLOW_OFFLOAD_HW)
+- nf_flow_offload_stats(flow_table, flow);
+-
+ if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
+ (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))) {
+ if (flow->flags & FLOW_OFFLOAD_HW) {
+@@ -361,6 +358,8 @@ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
+ } else {
+ flow_offload_del(flow_table, flow);
+ }
++ } else if (flow->flags & FLOW_OFFLOAD_HW) {
++ nf_flow_offload_stats(flow_table, flow);
+ }
+ }
+
+@@ -530,9 +529,9 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
+ static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
+ struct net_device *dev)
+ {
+- nf_flow_table_offload_flush(flowtable);
+ nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
+ flush_delayed_work(&flowtable->gc_work);
++ nf_flow_table_offload_flush(flowtable);
+ }
+
+ void nf_flow_table_cleanup(struct net_device *dev)
+@@ -554,6 +553,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
+ cancel_delayed_work_sync(&flow_table->gc_work);
+ nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
+ nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
++ nf_flow_table_offload_flush(flow_table);
+ rhashtable_destroy(&flow_table->rhashtable);
+ }
+ EXPORT_SYMBOL_GPL(nf_flow_table_free);
+diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
+index d06969af1085..b879e673953f 100644
+--- a/net/netfilter/nf_flow_table_offload.c
++++ b/net/netfilter/nf_flow_table_offload.c
+@@ -24,6 +24,7 @@ struct flow_offload_work {
+ };
+
+ struct nf_flow_key {
++ struct flow_dissector_key_meta meta;
+ struct flow_dissector_key_control control;
+ struct flow_dissector_key_basic basic;
+ union {
+@@ -55,6 +56,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
+ struct nf_flow_key *mask = &match->mask;
+ struct nf_flow_key *key = &match->key;
+
++ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta);
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
+@@ -62,6 +64,9 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
+
++ key->meta.ingress_ifindex = tuple->iifidx;
++ mask->meta.ingress_ifindex = 0xffffffff;
++
+ switch (tuple->l3proto) {
+ case AF_INET:
+ key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+@@ -105,7 +110,8 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
+ key->tp.dst = tuple->dst_port;
+ mask->tp.dst = 0xffff;
+
+- match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) |
++ match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_META) |
++ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS);
+ return 0;
+@@ -784,8 +790,7 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
+ __s32 delta;
+
+ delta = nf_flow_timeout_delta(flow->timeout);
+- if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
+- flow->flags & FLOW_OFFLOAD_HW_DYING)
++ if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10))
+ return;
+
+ offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
+diff --git a/security/selinux/avc.c b/security/selinux/avc.c
+index ecd3829996aa..23dc888ae305 100644
+--- a/security/selinux/avc.c
++++ b/security/selinux/avc.c
+@@ -424,7 +424,7 @@ static inline int avc_xperms_audit(struct selinux_state *state,
+ if (likely(!audited))
+ return 0;
+ return slow_avc_audit(state, ssid, tsid, tclass, requested,
+- audited, denied, result, ad, 0);
++ audited, denied, result, ad);
+ }
+
+ static void avc_node_free(struct rcu_head *rhead)
+@@ -758,8 +758,7 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
+ noinline int slow_avc_audit(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, u32 audited, u32 denied, int result,
+- struct common_audit_data *a,
+- unsigned int flags)
++ struct common_audit_data *a)
+ {
+ struct common_audit_data stack_data;
+ struct selinux_audit_data sad;
+@@ -772,17 +771,6 @@ noinline int slow_avc_audit(struct selinux_state *state,
+ a->type = LSM_AUDIT_DATA_NONE;
+ }
+
+- /*
+- * When in a RCU walk do the audit on the RCU retry. This is because
+- * the collection of the dname in an inode audit message is not RCU
+- * safe. Note this may drop some audits when the situation changes
+- * during retry. However this is logically just as if the operation
+- * happened a little later.
+- */
+- if ((a->type == LSM_AUDIT_DATA_INODE) &&
+- (flags & MAY_NOT_BLOCK))
+- return -ECHILD;
+-
+ sad.tclass = tclass;
+ sad.requested = requested;
+ sad.ssid = ssid;
+@@ -855,15 +843,14 @@ static int avc_update_node(struct selinux_avc *avc,
+ /*
+ * If we are in a non-blocking code path, e.g. VFS RCU walk,
+ * then we must not add permissions to a cache entry
+- * because we cannot safely audit the denial. Otherwise,
++ * because we will not audit the denial. Otherwise,
+ * during the subsequent blocking retry (e.g. VFS ref walk), we
+ * will find the permissions already granted in the cache entry
+ * and won't audit anything at all, leading to silent denials in
+ * permissive mode that only appear when in enforcing mode.
+ *
+- * See the corresponding handling in slow_avc_audit(), and the
+- * logic in selinux_inode_permission for the MAY_NOT_BLOCK flag,
+- * which is transliterated into AVC_NONBLOCKING.
++ * See the corresponding handling of MAY_NOT_BLOCK in avc_audit()
++ * and selinux_inode_permission().
+ */
+ if (flags & AVC_NONBLOCKING)
+ return 0;
+@@ -1205,6 +1192,25 @@ int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass,
+ return rc;
+ }
+
++int avc_has_perm_flags(struct selinux_state *state,
++ u32 ssid, u32 tsid, u16 tclass, u32 requested,
++ struct common_audit_data *auditdata,
++ int flags)
++{
++ struct av_decision avd;
++ int rc, rc2;
++
++ rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested,
++ (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
++ &avd);
++
++ rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
++ auditdata, flags);
++ if (rc2)
++ return rc2;
++ return rc;
++}
++
+ u32 avc_policy_seqno(struct selinux_state *state)
+ {
+ return state->avc->avc_cache.latest_notif;
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 116b4d644f68..db44c7eb4321 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2762,6 +2762,14 @@ static int selinux_mount(const char *dev_name,
+ return path_has_perm(cred, path, FILE__MOUNTON);
+ }
+
++static int selinux_move_mount(const struct path *from_path,
++ const struct path *to_path)
++{
++ const struct cred *cred = current_cred();
++
++ return path_has_perm(cred, to_path, FILE__MOUNTON);
++}
++
+ static int selinux_umount(struct vfsmount *mnt, int flags)
+ {
+ const struct cred *cred = current_cred();
+@@ -3004,14 +3012,14 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
+ if (IS_ERR(isec))
+ return PTR_ERR(isec);
+
+- return avc_has_perm(&selinux_state,
+- sid, isec->sid, isec->sclass, FILE__READ, &ad);
++ return avc_has_perm_flags(&selinux_state,
++ sid, isec->sid, isec->sclass, FILE__READ, &ad,
++ rcu ? MAY_NOT_BLOCK : 0);
+ }
+
+ static noinline int audit_inode_permission(struct inode *inode,
+ u32 perms, u32 audited, u32 denied,
+- int result,
+- unsigned flags)
++ int result)
+ {
+ struct common_audit_data ad;
+ struct inode_security_struct *isec = selinux_inode(inode);
+@@ -3022,7 +3030,7 @@ static noinline int audit_inode_permission(struct inode *inode,
+
+ rc = slow_avc_audit(&selinux_state,
+ current_sid(), isec->sid, isec->sclass, perms,
+- audited, denied, result, &ad, flags);
++ audited, denied, result, &ad);
+ if (rc)
+ return rc;
+ return 0;
+@@ -3069,7 +3077,11 @@ static int selinux_inode_permission(struct inode *inode, int mask)
+ if (likely(!audited))
+ return rc;
+
+- rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags);
++ /* fall back to ref-walk if we have to generate audit */
++ if (flags & MAY_NOT_BLOCK)
++ return -ECHILD;
++
++ rc2 = audit_inode_permission(inode, perms, audited, denied, rc);
+ if (rc2)
+ return rc2;
+ return rc;
+@@ -6903,6 +6915,8 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(sb_clone_mnt_opts, selinux_sb_clone_mnt_opts),
+ LSM_HOOK_INIT(sb_add_mnt_opt, selinux_add_mnt_opt),
+
++ LSM_HOOK_INIT(move_mount, selinux_move_mount),
++
+ LSM_HOOK_INIT(dentry_init_security, selinux_dentry_init_security),
+ LSM_HOOK_INIT(dentry_create_files_as, selinux_dentry_create_files_as),
+
+diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
+index 7be0e1e90e8b..cf4cc3ef959b 100644
+--- a/security/selinux/include/avc.h
++++ b/security/selinux/include/avc.h
+@@ -100,8 +100,7 @@ static inline u32 avc_audit_required(u32 requested,
+ int slow_avc_audit(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, u32 audited, u32 denied, int result,
+- struct common_audit_data *a,
+- unsigned flags);
++ struct common_audit_data *a);
+
+ /**
+ * avc_audit - Audit the granting or denial of permissions.
+@@ -135,9 +134,12 @@ static inline int avc_audit(struct selinux_state *state,
+ audited = avc_audit_required(requested, avd, result, 0, &denied);
+ if (likely(!audited))
+ return 0;
++ /* fall back to ref-walk if we have to generate audit */
++ if (flags & MAY_NOT_BLOCK)
++ return -ECHILD;
+ return slow_avc_audit(state, ssid, tsid, tclass,
+ requested, audited, denied, result,
+- a, flags);
++ a);
+ }
+
+ #define AVC_STRICT 1 /* Ignore permissive mode. */
+@@ -153,6 +155,11 @@ int avc_has_perm(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ struct common_audit_data *auditdata);
++int avc_has_perm_flags(struct selinux_state *state,
++ u32 ssid, u32 tsid,
++ u16 tclass, u32 requested,
++ struct common_audit_data *auditdata,
++ int flags);
+
+ int avc_has_extended_perms(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass, u32 requested,
+diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
+index a428ff393ea2..2b5f3b1b062b 100644
+--- a/sound/soc/soc-generic-dmaengine-pcm.c
++++ b/sound/soc/soc-generic-dmaengine-pcm.c
+@@ -117,7 +117,6 @@ dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
+ struct dma_chan *chan = pcm->chan[substream->stream];
+ struct snd_dmaengine_dai_dma_data *dma_data;
+ struct snd_pcm_hardware hw;
+- int ret;
+
+ if (pcm->config && pcm->config->pcm_hardware)
+ return snd_soc_set_runtime_hwparams(substream,
+@@ -138,12 +137,15 @@ dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
+ if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
+ hw.info |= SNDRV_PCM_INFO_BATCH;
+
+- ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream,
+- dma_data,
+- &hw,
+- chan);
+- if (ret)
+- return ret;
++ /**
++ * FIXME: Remove the return value check to align with the code
++ * before adding snd_dmaengine_pcm_refine_runtime_hwparams
++ * function.
++ */
++ snd_dmaengine_pcm_refine_runtime_hwparams(substream,
++ dma_data,
++ &hw,
++ chan);
+
+ return snd_soc_set_runtime_hwparams(substream, &hw);
+ }
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index 2ce9c5ba1934..9288be1d6bf0 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -500,7 +500,7 @@ static int do_dump(int argc, char **argv)
+ buf = (unsigned char *)(info->jited_prog_insns);
+ member_len = info->jited_prog_len;
+ } else { /* DUMP_XLATED */
+- if (info->xlated_prog_len == 0) {
++ if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
+ p_err("error retrieving insn dump: kernel.kptr_restrict set?");
+ goto err_free;
+ }
+diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
+index 0111d246d1ca..54a2857c2510 100644
+--- a/tools/power/acpi/Makefile.config
++++ b/tools/power/acpi/Makefile.config
+@@ -15,7 +15,7 @@ include $(srctree)/../../scripts/Makefile.include
+
+ OUTPUT=$(srctree)/
+ ifeq ("$(origin O)", "command line")
+- OUTPUT := $(O)/power/acpi/
++ OUTPUT := $(O)/tools/power/acpi/
+ endif
+ #$(info Determined 'OUTPUT' to be $(OUTPUT))
+
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+new file mode 100644
+index 000000000000..07f5b462c2ef
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+@@ -0,0 +1,74 @@
++// SPDX-License-Identifier: GPL-2.0
++// Copyright (c) 2020 Cloudflare
++
++#include "test_progs.h"
++
++static int connected_socket_v4(void)
++{
++ struct sockaddr_in addr = {
++ .sin_family = AF_INET,
++ .sin_port = htons(80),
++ .sin_addr = { inet_addr("127.0.0.1") },
++ };
++ socklen_t len = sizeof(addr);
++ int s, repair, err;
++
++ s = socket(AF_INET, SOCK_STREAM, 0);
++ if (CHECK_FAIL(s == -1))
++ goto error;
++
++ repair = TCP_REPAIR_ON;
++ err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
++ if (CHECK_FAIL(err))
++ goto error;
++
++ err = connect(s, (struct sockaddr *)&addr, len);
++ if (CHECK_FAIL(err))
++ goto error;
++
++ repair = TCP_REPAIR_OFF_NO_WP;
++ err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
++ if (CHECK_FAIL(err))
++ goto error;
++
++ return s;
++error:
++ perror(__func__);
++ close(s);
++ return -1;
++}
++
++/* Create a map, populate it with one socket, and free the map. */
++static void test_sockmap_create_update_free(enum bpf_map_type map_type)
++{
++ const int zero = 0;
++ int s, map, err;
++
++ s = connected_socket_v4();
++ if (CHECK_FAIL(s == -1))
++ return;
++
++ map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
++ if (CHECK_FAIL(map == -1)) {
++ perror("bpf_create_map");
++ goto out;
++ }
++
++ err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
++ if (CHECK_FAIL(err)) {
++ perror("bpf_map_update");
++ goto out;
++ }
++
++out:
++ close(map);
++ close(s);
++}
++
++void test_sockmap_basic(void)
++{
++ if (test__start_subtest("sockmap create_update_free"))
++ test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
++ if (test__start_subtest("sockhash create_update_free"))
++ test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
++}
+diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
+index 631d397ac81b..0a356aa91aa1 100644
+--- a/virt/kvm/arm/aarch32.c
++++ b/virt/kvm/arm/aarch32.c
+@@ -15,6 +15,10 @@
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_hyp.h>
+
++#define DFSR_FSC_EXTABT_LPAE 0x10
++#define DFSR_FSC_EXTABT_nLPAE 0x08
++#define DFSR_LPAE BIT(9)
++
+ /*
+ * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
+ */
+@@ -181,10 +185,12 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
+
+ /* Give the guest an IMPLEMENTATION DEFINED exception */
+ is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
+- if (is_lpae)
+- *fsr = 1 << 9 | 0x34;
+- else
+- *fsr = 0x14;
++ if (is_lpae) {
++ *fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
++ } else {
++ /* no need to shuffle FS[4] into DFSR[10] as its 0 */
++ *fsr = DFSR_FSC_EXTABT_nLPAE;
++ }
+ }
+
+ void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
+diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
+index f182b2380345..c6c2a9dde00c 100644
+--- a/virt/kvm/arm/arch_timer.c
++++ b/virt/kvm/arm/arch_timer.c
+@@ -805,6 +805,7 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
+ switch (treg) {
+ case TIMER_REG_TVAL:
+ val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
++ val &= lower_32_bits(val);
+ break;
+
+ case TIMER_REG_CTL:
+@@ -850,7 +851,7 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
+ {
+ switch (treg) {
+ case TIMER_REG_TVAL:
+- timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val;
++ timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val;
+ break;
+
+ case TIMER_REG_CTL:
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index 0b32a904a1bb..a2777efb558e 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -2147,7 +2147,8 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+ if (!kvm->arch.pgd)
+ return 0;
+ trace_kvm_test_age_hva(hva);
+- return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
++ return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
++ kvm_test_age_hva_handler, NULL);
+ }
+
+ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
+diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
+index 8731dfeced8b..4c08fd009768 100644
+--- a/virt/kvm/arm/pmu.c
++++ b/virt/kvm/arm/pmu.c
+@@ -480,25 +480,45 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
+ */
+ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
+ {
++ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ int i;
+- u64 type, enable, reg;
+
+- if (val == 0)
++ if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
+ return;
+
+- enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
++ /* Weed out disabled counters */
++ val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
++
+ for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
++ u64 type, reg;
++
+ if (!(val & BIT(i)))
+ continue;
+- type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
+- & ARMV8_PMU_EVTYPE_EVENT;
+- if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
+- && (enable & BIT(i))) {
+- reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
++
++ /* PMSWINC only applies to ... SW_INC! */
++ type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
++ type &= ARMV8_PMU_EVTYPE_EVENT;
++ if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
++ continue;
++
++ /* increment this even SW_INC counter */
++ reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
++ reg = lower_32_bits(reg);
++ __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
++
++ if (reg) /* no overflow on the low part */
++ continue;
++
++ if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
++ /* increment the high counter */
++ reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
+ reg = lower_32_bits(reg);
+- __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
+- if (!reg)
+- __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
++ __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
++ if (!reg) /* mark overflow on the high counter */
++ __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
++ } else {
++ /* mark overflow on low counter */
++ __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
+ }
+ }
+ }
+diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
+index 98c7360d9fb7..17920d1b350a 100644
+--- a/virt/kvm/arm/vgic/vgic-its.c
++++ b/virt/kvm/arm/vgic/vgic-its.c
+@@ -2475,7 +2475,8 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
+ target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
+ coll_id = val & KVM_ITS_CTE_ICID_MASK;
+
+- if (target_addr >= atomic_read(&kvm->online_vcpus))
++ if (target_addr != COLLECTION_NOT_MAPPED &&
++ target_addr >= atomic_read(&kvm->online_vcpus))
+ return -EINVAL;
+
+ collection = find_collection(its, coll_id);