diff options
author | Mike Pagano <mpagano@gentoo.org> | 2019-05-04 14:29:38 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2019-05-04 14:29:38 -0400 |
commit | dc81aa26ea1bd832413eabc76bcff4c1421e0b2c (patch) | |
tree | 5333cf573935d4599f02b96ec0eeb80fb3907585 | |
parent | Linux patch 5.0.11 (diff) | |
download | linux-patches-dc81aa26.tar.gz linux-patches-dc81aa26.tar.bz2 linux-patches-dc81aa26.zip |
Linux patch 5.0.125.0-13
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1011_linux-5.0.12.patch | 3398 |
2 files changed, 3402 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 4dfa4865..3b63726c 100644 --- a/0000_README +++ b/0000_README @@ -87,6 +87,10 @@ Patch: 1010_linux-5.0.11.patch From: http://www.kernel.org Desc: Linux 5.0.11 +Patch: 1011_linux-5.0.12.patch +From: http://www.kernel.org +Desc: Linux 5.0.12 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1011_linux-5.0.12.patch b/1011_linux-5.0.12.patch new file mode 100644 index 00000000..f1fc8ab5 --- /dev/null +++ b/1011_linux-5.0.12.patch @@ -0,0 +1,3398 @@ +diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 +index d1ee484a787d..ee9984f35868 100644 +--- a/Documentation/i2c/busses/i2c-i801 ++++ b/Documentation/i2c/busses/i2c-i801 +@@ -36,6 +36,7 @@ Supported adapters: + * Intel Cannon Lake (PCH) + * Intel Cedar Fork (PCH) + * Intel Ice Lake (PCH) ++ * Intel Comet Lake (PCH) + Datasheets: Publicly available at the Intel website + + On Intel Patsburg and later chipsets, both the normal host SMBus controller +diff --git a/Makefile b/Makefile +index c3daaefa979c..fd044f594bbf 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 0 +-SUBLEVEL = 11 ++SUBLEVEL = 12 + EXTRAVERSION = + NAME = Shy Crocodile + +@@ -31,7 +31,7 @@ _all: + # descending is started. They are now explicitly listed as the + # prepare rule. + +-ifneq ($(sub-make-done),1) ++ifneq ($(sub_make_done),1) + + # Do not use make's built-in rules and variables + # (this increases performance and avoids hard-to-debug behaviour) +@@ -159,6 +159,8 @@ need-sub-make := 1 + $(lastword $(MAKEFILE_LIST)): ; + endif + ++export sub_make_done := 1 ++ + ifeq ($(need-sub-make),1) + + PHONY += $(MAKECMDGOALS) sub-make +@@ -168,12 +170,12 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make + + # Invoke a second make in the output directory, passing relevant variables + sub-make: +- $(Q)$(MAKE) sub-make-done=1 \ ++ $(Q)$(MAKE) \ + $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \ + -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS)) + + endif # need-sub-make +-endif # sub-make-done ++endif # sub_make_done + + # We process the rest of the Makefile if this is the final invocation of make + ifeq ($(need-sub-make),) +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig +index 26524b75970a..e5d56d9b712c 100644 +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -593,6 +593,7 @@ config ARCH_DAVINCI + select HAVE_IDE + select PM_GENERIC_DOMAINS if PM + select PM_GENERIC_DOMAINS_OF if PM && OF ++ select REGMAP_MMIO + select RESET_CONTROLLER + select USE_OF + select ZONE_DMA +diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts +index 5641d162dfdb..28e7513ce617 100644 +--- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts ++++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts +@@ -93,7 +93,7 @@ + }; + + &hdmi { +- hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; ++ hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>; + }; + + &pwm { +diff --git a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi +index 1d1b4bd0670f..a4217f564a53 100644 +--- a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi ++++ b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi +@@ -264,7 +264,7 @@ + pinctrl-2 = <&pinctrl_usdhc3_200mhz>; + vmcc-supply = <®_sd3_vmmc>; + cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; +- bus-witdh = <4>; ++ bus-width = <4>; + no-1-8-v; + status = "okay"; + }; +@@ -275,7 +275,7 @@ + pinctrl-1 = <&pinctrl_usdhc4_100mhz>; + pinctrl-2 = <&pinctrl_usdhc4_200mhz>; + vmcc-supply = <®_sd4_vmmc>; +- bus-witdh = <8>; ++ bus-width = <8>; + no-1-8-v; + non-removable; + status = "okay"; +diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +index 1b50b01e9bac..65d03c5d409b 100644 +--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi ++++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +@@ -90,6 +90,7 @@ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_enet>; + phy-mode = "rgmii"; ++ phy-reset-duration = <10>; /* in msecs */ + phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>; + phy-supply = <&vdd_eth_io_reg>; + status = "disabled"; +diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h +index 3a875fc1b63c..cee06509f00a 100644 +--- a/arch/arm/include/asm/kvm_mmu.h ++++ b/arch/arm/include/asm/kvm_mmu.h +@@ -381,6 +381,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm, + return ret; + } + ++static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, ++ const void *data, unsigned long len) ++{ ++ int srcu_idx = srcu_read_lock(&kvm->srcu); ++ int ret = kvm_write_guest(kvm, gpa, data, len); ++ ++ srcu_read_unlock(&kvm->srcu, srcu_idx); ++ ++ return ret; ++} ++ + static inline void *kvm_get_hyp_vector(void) + { + switch(read_cpuid_part()) { +diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h +index de2089501b8b..9e11dce55e06 100644 +--- a/arch/arm/include/asm/stage2_pgtable.h ++++ b/arch/arm/include/asm/stage2_pgtable.h +@@ -75,6 +75,8 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm) + + #define S2_PMD_MASK PMD_MASK + #define S2_PMD_SIZE PMD_SIZE ++#define S2_PUD_MASK PUD_MASK ++#define S2_PUD_SIZE PUD_SIZE + + static inline bool kvm_stage2_has_pmd(struct kvm *kvm) + { +diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c +index c7169c2f94c4..08c7892866c2 100644 +--- a/arch/arm/mach-imx/mach-imx51.c ++++ b/arch/arm/mach-imx/mach-imx51.c +@@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void) + return; + + m4if_base = of_iomap(np, 0); ++ of_node_put(np); + if (!m4if_base) { + pr_err("Unable to map M4IF registers\n"); + return; +diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi +index b2f606e286ce..327d12097643 100644 +--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi ++++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi +@@ -2,7 +2,7 @@ + /* + * Device Tree Source for the R-Car E3 (R8A77990) SoC + * +- * Copyright (C) 2018 Renesas Electronics Corp. ++ * Copyright (C) 2018-2019 Renesas Electronics Corp. + */ + + #include <dt-bindings/clock/r8a77990-cpg-mssr.h> +@@ -1040,9 +1040,8 @@ + <&cpg CPG_CORE R8A77990_CLK_S3D1C>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; +- dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, +- <&dmac2 0x5b>, <&dmac2 0x5a>; +- dma-names = "tx", "rx", "tx", "rx"; ++ dmas = <&dmac0 0x5b>, <&dmac0 0x5a>; ++ dma-names = "tx", "rx"; + power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; + resets = <&cpg 202>; + status = "disabled"; +diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h +index 8af4b1befa42..c246effd1b67 100644 +--- a/arch/arm64/include/asm/kvm_mmu.h ++++ b/arch/arm64/include/asm/kvm_mmu.h +@@ -444,6 +444,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm, + return ret; + } + ++static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, ++ const void *data, unsigned long len) ++{ ++ int srcu_idx = srcu_read_lock(&kvm->srcu); ++ int ret = kvm_write_guest(kvm, gpa, data, len); ++ ++ srcu_read_unlock(&kvm->srcu, srcu_idx); ++ ++ return ret; ++} ++ + #ifdef CONFIG_KVM_INDIRECT_VECTORS + /* + * EL2 vectors can be mapped and rerouted in a number of ways, +diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c +index f16a5f8ff2b4..e2a0500cd7a2 100644 +--- a/arch/arm64/kvm/reset.c ++++ b/arch/arm64/kvm/reset.c +@@ -123,6 +123,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) + int ret = -EINVAL; + bool loaded; + ++ /* Reset PMU outside of the non-preemptible section */ ++ kvm_pmu_vcpu_reset(vcpu); ++ + preempt_disable(); + loaded = (vcpu->cpu != -1); + if (loaded) +@@ -170,9 +173,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) + vcpu->arch.reset_state.reset = false; + } + +- /* Reset PMU */ +- kvm_pmu_vcpu_reset(vcpu); +- + /* Default workaround setup is enabled (if supported) */ + if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) + vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; +diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h +index 7d22a474a040..f74639a05f0f 100644 +--- a/arch/s390/include/asm/elf.h ++++ b/arch/s390/include/asm/elf.h +@@ -252,11 +252,14 @@ do { \ + + /* + * Cache aliasing on the latest machines calls for a mapping granularity +- * of 512KB. For 64-bit processes use a 512KB alignment and a randomization +- * of up to 1GB. For 31-bit processes the virtual address space is limited, +- * use no alignment and limit the randomization to 8MB. ++ * of 512KB for the anonymous mapping base. For 64-bit processes use a ++ * 512KB alignment and a randomization of up to 1GB. For 31-bit processes ++ * the virtual address space is limited, use no alignment and limit the ++ * randomization to 8MB. ++ * For the additional randomization of the program break use 32MB for ++ * 64-bit and 8MB for 31-bit. + */ +-#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL) ++#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL) + #define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) + #define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) + #define STACK_RND_MASK MMAP_RND_MASK +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 71d763ad2637..9f2d890733a9 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -1198,6 +1198,8 @@ struct kvm_x86_ops { + int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu, + uint16_t *vmcs_version); + uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu); ++ ++ bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu); + }; + + struct kvm_arch_async_pf { +diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c +index 89d20ed1d2e8..371c669696d7 100644 +--- a/arch/x86/kvm/hyperv.c ++++ b/arch/x86/kvm/hyperv.c +@@ -526,7 +526,9 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, + new_config.enable = 0; + stimer->config.as_uint64 = new_config.as_uint64; + +- stimer_mark_pending(stimer, false); ++ if (stimer->config.enable) ++ stimer_mark_pending(stimer, false); ++ + return 0; + } + +@@ -542,7 +544,10 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, + stimer->config.enable = 0; + else if (stimer->config.auto_enable) + stimer->config.enable = 1; +- stimer_mark_pending(stimer, false); ++ ++ if (stimer->config.enable) ++ stimer_mark_pending(stimer, false); ++ + return 0; + } + +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index 9ab33cab9486..77dbb57412cc 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -4915,11 +4915,15 @@ static union kvm_mmu_role + kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, + bool execonly) + { +- union kvm_mmu_role role; ++ union kvm_mmu_role role = {0}; ++ union kvm_mmu_page_role root_base = vcpu->arch.root_mmu.mmu_role.base; + +- /* Base role is inherited from root_mmu */ +- role.base.word = vcpu->arch.root_mmu.mmu_role.base.word; +- role.ext = kvm_calc_mmu_role_ext(vcpu); ++ /* Legacy paging and SMM flags are inherited from root_mmu */ ++ role.base.smm = root_base.smm; ++ role.base.nxe = root_base.nxe; ++ role.base.cr0_wp = root_base.cr0_wp; ++ role.base.smep_andnot_wp = root_base.smep_andnot_wp; ++ role.base.smap_andnot_wp = root_base.smap_andnot_wp; + + role.base.level = PT64_ROOT_4LEVEL; + role.base.direct = false; +@@ -4927,6 +4931,7 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, + role.base.guest_mode = true; + role.base.access = ACC_ALL; + ++ role.ext = kvm_calc_mmu_role_ext(vcpu); + role.ext.execonly = execonly; + + return role; +@@ -5390,10 +5395,12 @@ emulate: + * This can happen if a guest gets a page-fault on data access but the HW + * table walker is not able to read the instruction page (e.g instruction + * page is not present in memory). In those cases we simply restart the +- * guest. ++ * guest, with the exception of AMD Erratum 1096 which is unrecoverable. + */ +- if (unlikely(insn && !insn_len)) +- return 1; ++ if (unlikely(insn && !insn_len)) { ++ if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu)) ++ return 1; ++ } + + er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); + +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 516c1de03d47..e544cec812f9 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -7114,6 +7114,36 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu, + return -ENODEV; + } + ++static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu) ++{ ++ bool is_user, smap; ++ ++ is_user = svm_get_cpl(vcpu) == 3; ++ smap = !kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); ++ ++ /* ++ * Detect and workaround Errata 1096 Fam_17h_00_0Fh ++ * ++ * In non SEV guest, hypervisor will be able to read the guest ++ * memory to decode the instruction pointer when insn_len is zero ++ * so we return true to indicate that decoding is possible. ++ * ++ * But in the SEV guest, the guest memory is encrypted with the ++ * guest specific key and hypervisor will not be able to decode the ++ * instruction pointer so we will not able to workaround it. Lets ++ * print the error and request to kill the guest. ++ */ ++ if (is_user && smap) { ++ if (!sev_guest(vcpu->kvm)) ++ return true; ++ ++ pr_err_ratelimited("KVM: Guest triggered AMD Erratum 1096\n"); ++ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); ++ } ++ ++ return false; ++} ++ + static struct kvm_x86_ops svm_x86_ops __ro_after_init = { + .cpu_has_kvm_support = has_svm, + .disabled_by_bios = is_disabled, +@@ -7247,6 +7277,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { + + .nested_enable_evmcs = nested_enable_evmcs, + .nested_get_evmcs_version = nested_get_evmcs_version, ++ ++ .need_emulation_on_page_fault = svm_need_emulation_on_page_fault, + }; + + static int __init svm_init(void) +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index 34499081022c..e7fe8c692362 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -7526,6 +7526,11 @@ static int enable_smi_window(struct kvm_vcpu *vcpu) + return 0; + } + ++static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu) ++{ ++ return 0; ++} ++ + static __init int hardware_setup(void) + { + unsigned long host_bndcfgs; +@@ -7828,6 +7833,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { + .set_nested_state = NULL, + .get_vmcs12_pages = NULL, + .nested_enable_evmcs = NULL, ++ .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault, + }; + + static void vmx_cleanup_l1d_flush(void) +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 2db58067bb59..8c9fb6453b2f 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1127,7 +1127,7 @@ static u32 msrs_to_save[] = { + #endif + MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, + MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, +- MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES, ++ MSR_IA32_SPEC_CTRL, + MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, + MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, + MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, +@@ -1160,6 +1160,7 @@ static u32 emulated_msrs[] = { + + MSR_IA32_TSC_ADJUST, + MSR_IA32_TSCDEADLINE, ++ MSR_IA32_ARCH_CAPABILITIES, + MSR_IA32_MISC_ENABLE, + MSR_IA32_MCG_STATUS, + MSR_IA32_MCG_CTL, +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c +index db3165714521..dc726e07d8ba 100644 +--- a/arch/x86/mm/mmap.c ++++ b/arch/x86/mm/mmap.c +@@ -230,7 +230,7 @@ bool mmap_address_hint_valid(unsigned long addr, unsigned long len) + /* Can we access it for direct reading/writing? Must be RAM: */ + int valid_phys_addr_range(phys_addr_t addr, size_t count) + { +- return addr + count <= __pa(high_memory); ++ return addr + count - 1 <= __pa(high_memory - 1); + } + + /* Can we access it through mmap? Must be a valid physical address: */ +diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c +index d10105825d57..47d097946872 100644 +--- a/arch/x86/realmode/init.c ++++ b/arch/x86/realmode/init.c +@@ -20,8 +20,6 @@ void __init set_real_mode_mem(phys_addr_t mem, size_t size) + void *base = __va(mem); + + real_mode_header = (struct real_mode_header *) base; +- printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", +- base, (unsigned long long)mem, size); + } + + void __init reserve_real_mode(void) +diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c +index 4424997ecf30..e10fec99a182 100644 +--- a/drivers/acpi/acpica/evgpe.c ++++ b/drivers/acpi/acpica/evgpe.c +@@ -81,12 +81,8 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) + + ACPI_FUNCTION_TRACE(ev_enable_gpe); + +- /* Clear the GPE status */ +- status = acpi_hw_clear_gpe(gpe_event_info); +- if (ACPI_FAILURE(status)) +- return_ACPI_STATUS(status); +- + /* Enable the requested GPE */ ++ + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); + return_ACPI_STATUS(status); + } +diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c +index b3ed8f9953a8..173e6f2dd9af 100644 +--- a/drivers/ata/libata-zpodd.c ++++ b/drivers/ata/libata-zpodd.c +@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev) + /* Per the spec, only slot type and drawer type ODD can be supported */ + static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) + { +- char buf[16]; ++ char *buf; + unsigned int ret; +- struct rm_feature_desc *desc = (void *)(buf + 8); ++ struct rm_feature_desc *desc; + struct ata_taskfile tf; + static const char cdb[] = { GPCMD_GET_CONFIGURATION, + 2, /* only 1 feature descriptor requested */ + 0, 3, /* 3, removable medium feature */ + 0, 0, 0,/* reserved */ +- 0, sizeof(buf), ++ 0, 16, + 0, 0, 0, + }; + ++ buf = kzalloc(16, GFP_KERNEL); ++ if (!buf) ++ return ODD_MECH_TYPE_UNSUPPORTED; ++ desc = (void *)(buf + 8); ++ + ata_tf_init(dev, &tf); + tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.command = ATA_CMD_PACKET; + tf.protocol = ATAPI_PROT_PIO; +- tf.lbam = sizeof(buf); ++ tf.lbam = 16; + + ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, +- buf, sizeof(buf), 0); +- if (ret) ++ buf, 16, 0); ++ if (ret) { ++ kfree(buf); + return ODD_MECH_TYPE_UNSUPPORTED; ++ } + +- if (be16_to_cpu(desc->feature_code) != 3) ++ if (be16_to_cpu(desc->feature_code) != 3) { ++ kfree(buf); + return ODD_MECH_TYPE_UNSUPPORTED; ++ } + +- if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) ++ if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) { ++ kfree(buf); + return ODD_MECH_TYPE_SLOT; +- else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1) ++ } else if (desc->mech_type == 1 && desc->load == 0 && ++ desc->eject == 1) { ++ kfree(buf); + return ODD_MECH_TYPE_DRAWER; +- else ++ } else { ++ kfree(buf); + return ODD_MECH_TYPE_UNSUPPORTED; ++ } + } + + /* Test if ODD is zero power ready by sense code */ +diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c +index 854bce4fb9e7..217507002dbc 100644 +--- a/drivers/gpio/gpio-aspeed.c ++++ b/drivers/gpio/gpio-aspeed.c +@@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev) + + gpio->offset_timer = + devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL); ++ if (!gpio->offset_timer) ++ return -ENOMEM; + + return aspeed_gpio_setup_irqs(gpio, pdev); + } +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c +index a1dd2f1c0d02..13a402ede07a 100644 +--- a/drivers/gpio/gpiolib-of.c ++++ b/drivers/gpio/gpiolib-of.c +@@ -119,7 +119,8 @@ static void of_gpio_flags_quirks(struct device_node *np, + * to determine if the flags should have inverted semantics. + */ + if (IS_ENABLED(CONFIG_SPI_MASTER) && +- of_property_read_bool(np, "cs-gpios")) { ++ of_property_read_bool(np, "cs-gpios") && ++ !strcmp(propname, "cs-gpios")) { + struct device_node *child; + u32 cs; + int ret; +@@ -141,16 +142,16 @@ static void of_gpio_flags_quirks(struct device_node *np, + * conflict and the "spi-cs-high" flag will + * take precedence. + */ +- if (of_property_read_bool(np, "spi-cs-high")) { ++ if (of_property_read_bool(child, "spi-cs-high")) { + if (*flags & OF_GPIO_ACTIVE_LOW) { + pr_warn("%s GPIO handle specifies active low - ignored\n", +- of_node_full_name(np)); ++ of_node_full_name(child)); + *flags &= ~OF_GPIO_ACTIVE_LOW; + } + } else { + if (!(*flags & OF_GPIO_ACTIVE_LOW)) + pr_info("%s enforce active low on chipselect handle\n", +- of_node_full_name(np)); ++ of_node_full_name(child)); + *flags |= OF_GPIO_ACTIVE_LOW; + } + break; +@@ -711,7 +712,13 @@ int of_gpiochip_add(struct gpio_chip *chip) + + of_node_get(chip->of_node); + +- return of_gpiochip_scan_gpios(chip); ++ status = of_gpiochip_scan_gpios(chip); ++ if (status) { ++ of_node_put(chip->of_node); ++ gpiochip_remove_pin_ranges(chip); ++ } ++ ++ return status; + } + + void of_gpiochip_remove(struct gpio_chip *chip) +diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c +index 12e5e2be7890..7a59b8b3ed5a 100644 +--- a/drivers/gpu/drm/drm_drv.c ++++ b/drivers/gpu/drm/drm_drv.c +@@ -381,11 +381,7 @@ void drm_dev_unplug(struct drm_device *dev) + synchronize_srcu(&drm_unplug_srcu); + + drm_dev_unregister(dev); +- +- mutex_lock(&drm_global_mutex); +- if (dev->open_count == 0) +- drm_dev_put(dev); +- mutex_unlock(&drm_global_mutex); ++ drm_dev_put(dev); + } + EXPORT_SYMBOL(drm_dev_unplug); + +diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c +index 46f48f245eb5..3f20f598cd7c 100644 +--- a/drivers/gpu/drm/drm_file.c ++++ b/drivers/gpu/drm/drm_file.c +@@ -479,11 +479,9 @@ int drm_release(struct inode *inode, struct file *filp) + + drm_file_free(file_priv); + +- if (!--dev->open_count) { ++ if (!--dev->open_count) + drm_lastclose(dev); +- if (drm_dev_is_unplugged(dev)) +- drm_put_dev(dev); +- } ++ + mutex_unlock(&drm_global_mutex); + + drm_minor_release(minor); +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c +index dcd1df5322e8..21c6016ccba5 100644 +--- a/drivers/gpu/drm/i915/intel_dp.c ++++ b/drivers/gpu/drm/i915/intel_dp.c +@@ -1871,6 +1871,9 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp, + u8 dsc_max_bpc; + int pipe_bpp; + ++ pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && ++ intel_dp_supports_fec(intel_dp, pipe_config); ++ + if (!intel_dp_supports_dsc(intel_dp, pipe_config)) + return false; + +@@ -2097,9 +2100,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) + return false; + +- pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && +- intel_dp_supports_fec(intel_dp, pipe_config); +- + if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state)) + return false; + +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c +index 12ff47b13668..a13704ab5d11 100644 +--- a/drivers/gpu/drm/meson/meson_drv.c ++++ b/drivers/gpu/drm/meson/meson_drv.c +@@ -317,12 +317,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) + + ret = drm_dev_register(drm, 0); + if (ret) +- goto free_drm; ++ goto uninstall_irq; + + drm_fbdev_generic_setup(drm, 32); + + return 0; + ++uninstall_irq: ++ drm_irq_uninstall(drm); + free_drm: + drm_dev_put(drm); + +@@ -336,8 +338,8 @@ static int meson_drv_bind(struct device *dev) + + static void meson_drv_unbind(struct device *dev) + { +- struct drm_device *drm = dev_get_drvdata(dev); +- struct meson_drm *priv = drm->dev_private; ++ struct meson_drm *priv = dev_get_drvdata(dev); ++ struct drm_device *drm = priv->drm; + + if (priv->canvas) { + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); +@@ -347,6 +349,7 @@ static void meson_drv_unbind(struct device *dev) + } + + drm_dev_unregister(drm); ++ drm_irq_uninstall(drm); + drm_kms_helper_poll_fini(drm); + drm_mode_config_cleanup(drm); + drm_dev_put(drm); +diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c +index 922a48d5a483..c7c612579270 100644 +--- a/drivers/gpu/drm/tegra/hub.c ++++ b/drivers/gpu/drm/tegra/hub.c +@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane, + static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) + { +- struct tegra_dc *dc = to_tegra_dc(old_state->crtc); + struct tegra_plane *p = to_tegra_plane(plane); ++ struct tegra_dc *dc; + u32 value; + + /* rien ne va plus */ + if (!old_state || !old_state->crtc) + return; + ++ dc = to_tegra_dc(old_state->crtc); ++ + /* + * XXX Legacy helpers seem to sometimes call ->atomic_disable() even + * on planes that are already disabled. Make sure we fallback to the +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig +index f2c681971201..f8979abb9a19 100644 +--- a/drivers/i2c/busses/Kconfig ++++ b/drivers/i2c/busses/Kconfig +@@ -131,6 +131,7 @@ config I2C_I801 + Cannon Lake (PCH) + Cedar Fork (PCH) + Ice Lake (PCH) ++ Comet Lake (PCH) + + This driver can also be built as a module. If so, the module + will be called i2c-i801. +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c +index c91e145ef5a5..679c6c41f64b 100644 +--- a/drivers/i2c/busses/i2c-i801.c ++++ b/drivers/i2c/busses/i2c-i801.c +@@ -71,6 +71,7 @@ + * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes + * Cedar Fork (PCH) 0x18df 32 hard yes yes yes + * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes ++ * Comet Lake (PCH) 0x02a3 32 hard yes yes yes + * + * Features supported by this driver: + * Software PEC no +@@ -240,6 +241,7 @@ + #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 + #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3 + #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323 ++#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3 + + struct i801_mux_config { + char *gpio_chip; +@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) }, + { 0, } + }; + +@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) + case PCI_DEVICE_ID_INTEL_DNV_SMBUS: + case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: + case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS: ++ case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS: + priv->features |= FEATURE_I2C_BLOCK_READ; + priv->features |= FEATURE_IRQ; + priv->features |= FEATURE_SMBUS_PEC; +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index e628ef23418f..55b3e4b9d5dc 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -3166,21 +3166,24 @@ static void amd_iommu_get_resv_regions(struct device *dev, + return; + + list_for_each_entry(entry, &amd_iommu_unity_map, list) { ++ int type, prot = 0; + size_t length; +- int prot = 0; + + if (devid < entry->devid_start || devid > entry->devid_end) + continue; + ++ type = IOMMU_RESV_DIRECT; + length = entry->address_end - entry->address_start; + if (entry->prot & IOMMU_PROT_IR) + prot |= IOMMU_READ; + if (entry->prot & IOMMU_PROT_IW) + prot |= IOMMU_WRITE; ++ if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE) ++ /* Exclusion range */ ++ type = IOMMU_RESV_RESERVED; + + region = iommu_alloc_resv_region(entry->address_start, +- length, prot, +- IOMMU_RESV_DIRECT); ++ length, prot, type); + if (!region) { + pr_err("Out of memory allocating dm-regions for %s\n", + dev_name(dev)); +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c +index 66123b911ec8..84fa5b22371e 100644 +--- a/drivers/iommu/amd_iommu_init.c ++++ b/drivers/iommu/amd_iommu_init.c +@@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m) + if (e == NULL) + return -ENOMEM; + ++ if (m->flags & IVMD_FLAG_EXCL_RANGE) ++ init_exclusion_range(m); ++ + switch (m->type) { + default: + kfree(e); +@@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table) + + while (p < end) { + m = (struct ivmd_header *)p; +- if (m->flags & IVMD_FLAG_EXCL_RANGE) +- init_exclusion_range(m); +- else if (m->flags & IVMD_FLAG_UNITY_MAP) ++ if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) + init_unity_map_range(m); + + p += m->length; +diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h +index eae0741f72dc..87965e4d9647 100644 +--- a/drivers/iommu/amd_iommu_types.h ++++ b/drivers/iommu/amd_iommu_types.h +@@ -374,6 +374,8 @@ + #define IOMMU_PROT_IR 0x01 + #define IOMMU_PROT_IW 0x02 + ++#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2) ++ + /* IOMMU capabilities */ + #define IOMMU_CAP_IOTLB 24 + #define IOMMU_CAP_NPCACHE 26 +diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c +index 7fea18b0c15d..7cb4d685a1f1 100644 +--- a/drivers/leds/leds-pca9532.c ++++ b/drivers/leds/leds-pca9532.c +@@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client, + const struct i2c_device_id *id) + { + int devid; ++ const struct of_device_id *of_id; + struct pca9532_data *data = i2c_get_clientdata(client); + struct pca9532_platform_data *pca9532_pdata = + dev_get_platdata(&client->dev); +@@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client, + dev_err(&client->dev, "no platform data\n"); + return -EINVAL; + } +- devid = (int)(uintptr_t)of_match_device( +- of_pca9532_leds_match, &client->dev)->data; ++ of_id = of_match_device(of_pca9532_leds_match, ++ &client->dev); ++ if (unlikely(!of_id)) ++ return -EINVAL; ++ devid = (int)(uintptr_t) of_id->data; + } else { + devid = id->driver_data; + } +diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c +index 3dd3ed46d473..136f86a1627d 100644 +--- a/drivers/leds/trigger/ledtrig-netdev.c ++++ b/drivers/leds/trigger/ledtrig-netdev.c +@@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev, + trigger_data->net_dev = NULL; + } + +- strncpy(trigger_data->device_name, buf, size); ++ memcpy(trigger_data->device_name, buf, size); ++ trigger_data->device_name[size] = 0; + if (size > 0 && trigger_data->device_name[size - 1] == '\n') + trigger_data->device_name[size - 1] = 0; + +@@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb, + container_of(nb, struct led_netdev_data, notifier); + + if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE +- && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER +- && evt != NETDEV_CHANGENAME) ++ && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER) + return NOTIFY_DONE; + +- if (strcmp(dev->name, trigger_data->device_name)) ++ if (!(dev == trigger_data->net_dev || ++ (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name)))) + return NOTIFY_DONE; + + cancel_delayed_work_sync(&trigger_data->work); +@@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb, + dev_hold(dev); + trigger_data->net_dev = dev; + break; +- case NETDEV_CHANGENAME: + case NETDEV_UNREGISTER: +- if (trigger_data->net_dev) { +- dev_put(trigger_data->net_dev); +- trigger_data->net_dev = NULL; +- } ++ dev_put(trigger_data->net_dev); ++ trigger_data->net_dev = NULL; + break; + case NETDEV_UP: + case NETDEV_CHANGE: +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c +index 2b2882615e8b..6cbe515bfdeb 100644 +--- a/drivers/net/ethernet/cadence/macb_main.c ++++ b/drivers/net/ethernet/cadence/macb_main.c +@@ -3318,14 +3318,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, + *hclk = devm_clk_get(&pdev->dev, "hclk"); + } + +- if (IS_ERR(*pclk)) { ++ if (IS_ERR_OR_NULL(*pclk)) { + err = PTR_ERR(*pclk); ++ if (!err) ++ err = -ENODEV; ++ + dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); + return err; + } + +- if (IS_ERR(*hclk)) { ++ if (IS_ERR_OR_NULL(*hclk)) { + err = PTR_ERR(*hclk); ++ if (!err) ++ err = -ENODEV; ++ + dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); + return err; + } +diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c +index 3baabdc89726..90b62c1412c8 100644 +--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c ++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c +@@ -3160,6 +3160,7 @@ static ssize_t ehea_probe_port(struct device *dev, + + if (ehea_add_adapter_mr(adapter)) { + pr_err("creating MR failed\n"); ++ of_node_put(eth_dn); + return -EIO; + } + +diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c +index bd6e9014bc74..b83b070a9eec 100644 +--- a/drivers/net/ethernet/micrel/ks8851.c ++++ b/drivers/net/ethernet/micrel/ks8851.c +@@ -535,9 +535,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) + /* set dma read address */ + ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00); + +- /* start the packet dma process, and set auto-dequeue rx */ +- ks8851_wrreg16(ks, KS_RXQCR, +- ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); ++ /* start DMA access */ ++ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); + + if (rxlen > 4) { + unsigned int rxalign; +@@ -568,7 +567,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) + } + } + +- ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); ++ /* end DMA access and dequeue packet */ ++ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF); + } + } + +@@ -785,6 +785,15 @@ static void ks8851_tx_work(struct work_struct *work) + static int ks8851_net_open(struct net_device *dev) + { + struct ks8851_net *ks = netdev_priv(dev); ++ int ret; ++ ++ ret = request_threaded_irq(dev->irq, NULL, ks8851_irq, ++ IRQF_TRIGGER_LOW | IRQF_ONESHOT, ++ dev->name, ks); ++ if (ret < 0) { ++ netdev_err(dev, "failed to get irq\n"); ++ return ret; ++ } + + /* lock the card, even if we may not actually be doing anything + * else at the moment */ +@@ -849,6 +858,7 @@ static int ks8851_net_open(struct net_device *dev) + netif_dbg(ks, ifup, ks->netdev, "network device up\n"); + + mutex_unlock(&ks->lock); ++ mii_check_link(&ks->mii); + return 0; + } + +@@ -899,6 +909,8 @@ static int ks8851_net_stop(struct net_device *dev) + dev_kfree_skb(txb); + } + ++ free_irq(dev->irq, ks); ++ + return 0; + } + +@@ -1508,6 +1520,7 @@ static int ks8851_probe(struct spi_device *spi) + + spi_set_drvdata(spi, ks); + ++ netif_carrier_off(ks->netdev); + ndev->if_port = IF_PORT_100BASET; + ndev->netdev_ops = &ks8851_netdev_ops; + ndev->irq = spi->irq; +@@ -1529,14 +1542,6 @@ static int ks8851_probe(struct spi_device *spi) + ks8851_read_selftest(ks); + ks8851_init_mac(ks); + +- ret = request_threaded_irq(spi->irq, NULL, ks8851_irq, +- IRQF_TRIGGER_LOW | IRQF_ONESHOT, +- ndev->name, ks); +- if (ret < 0) { +- dev_err(&spi->dev, "failed to get irq\n"); +- goto err_irq; +- } +- + ret = register_netdev(ndev); + if (ret) { + dev_err(&spi->dev, "failed to register network device\n"); +@@ -1549,14 +1554,10 @@ static int ks8851_probe(struct spi_device *spi) + + return 0; + +- + err_netdev: +- free_irq(ndev->irq, ks); +- +-err_irq: ++err_id: + if (gpio_is_valid(gpio)) + gpio_set_value(gpio, 0); +-err_id: + regulator_disable(ks->vdd_reg); + err_reg: + regulator_disable(ks->vdd_io); +@@ -1574,7 +1575,6 @@ static int ks8851_remove(struct spi_device *spi) + dev_info(&spi->dev, "remove\n"); + + unregister_netdev(priv->netdev); +- free_irq(spi->irq, priv); + if (gpio_is_valid(priv->gpio)) + gpio_set_value(priv->gpio, 0); + regulator_disable(priv->vdd_reg); +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +index 3b0adda7cc9c..a4cd6f2cfb86 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +@@ -1048,6 +1048,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) + + for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { + skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE); ++ if (!skb) ++ break; + qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); + skb_put(skb, QLCNIC_ILB_PKT_SIZE); + adapter->ahw->diag_cnt = 0; +diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +index c0c75c111abb..4d9bcb4d0378 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c ++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +@@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) + + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, +- STMMAC_RING_MODE, 1, false, skb->len); ++ STMMAC_RING_MODE, 0, false, skb->len); + tx_q->tx_skbuff[entry] = NULL; + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + +@@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) + + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + stmmac_prepare_tx_desc(priv, desc, 0, len, csum, +- STMMAC_RING_MODE, 1, true, skb->len); ++ STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb), ++ skb->len); + } else { + des2 = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); +@@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) + tx_q->tx_skbuff_dma[entry].is_jumbo = true; + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); + stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, +- STMMAC_RING_MODE, 1, true, skb->len); ++ STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb), ++ skb->len); + } + + tx_q->cur_tx = entry; +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 1d8d6f2ddfd6..0bc3632880b5 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -3190,14 +3190,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) + stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, + csum_insertion, priv->mode, 1, last_segment, + skb->len); +- +- /* The own bit must be the latest setting done when prepare the +- * descriptor and then barrier is needed to make sure that +- * all is coherent before granting the DMA engine. +- */ +- wmb(); ++ } else { ++ stmmac_set_tx_owner(priv, first); + } + ++ /* The own bit must be the latest setting done when prepare the ++ * descriptor and then barrier is needed to make sure that ++ * all is coherent before granting the DMA engine. ++ */ ++ wmb(); ++ + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); + + stmmac_enable_dma_transmission(priv, priv->ioaddr); +diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c +index 5174d318901e..0a920c5936b2 100644 +--- a/drivers/net/ethernet/ti/netcp_ethss.c ++++ b/drivers/net/ethernet/ti/netcp_ethss.c +@@ -3657,12 +3657,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, + + ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, + gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); +- if (ret) ++ if (ret) { ++ of_node_put(interfaces); + return ret; ++ } + + ret = netcp_txpipe_open(&gbe_dev->tx_pipe); +- if (ret) ++ if (ret) { ++ of_node_put(interfaces); + return ret; ++ } + + /* Create network interfaces */ + INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +index 0789d8af7d72..1ef56edb3918 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +@@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev) + ret = of_address_to_resource(np, 0, &dmares); + if (ret) { + dev_err(&pdev->dev, "unable to get DMA resource\n"); ++ of_node_put(np); + goto free_netdev; + } + lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); + if (IS_ERR(lp->dma_regs)) { + dev_err(&pdev->dev, "could not map DMA regs\n"); + ret = PTR_ERR(lp->dma_regs); ++ of_node_put(np); + goto free_netdev; + } + lp->rx_irq = irq_of_parse_and_map(np, 1); +diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c +index cd1d8faccca5..cd6b95e673a5 100644 +--- a/drivers/net/ieee802154/adf7242.c ++++ b/drivers/net/ieee802154/adf7242.c +@@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi) + INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work); + lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev), + WQ_MEM_RECLAIM); ++ if (unlikely(!lp->wqueue)) { ++ ret = -ENOMEM; ++ goto err_hw_init; ++ } + + ret = adf7242_hw_init(lp); + if (ret) +diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c +index b6743f03dce0..3b88846de31b 100644 +--- a/drivers/net/ieee802154/mac802154_hwsim.c ++++ b/drivers/net/ieee802154/mac802154_hwsim.c +@@ -324,7 +324,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) + goto out_err; + } + +- genlmsg_reply(skb, info); ++ res = genlmsg_reply(skb, info); + break; + } + +diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c +index 24c7f149f3e6..e11057892f07 100644 +--- a/drivers/net/phy/dp83822.c ++++ b/drivers/net/phy/dp83822.c +@@ -23,6 +23,8 @@ + #include <linux/netdevice.h> + + #define DP83822_PHY_ID 0x2000a240 ++#define DP83825I_PHY_ID 0x2000a150 ++ + #define DP83822_DEVADDR 0x1f + + #define MII_DP83822_PHYSCR 0x11 +@@ -312,26 +314,30 @@ static int dp83822_resume(struct phy_device *phydev) + return 0; + } + ++#define DP83822_PHY_DRIVER(_id, _name) \ ++ { \ ++ PHY_ID_MATCH_MODEL(_id), \ ++ .name = (_name), \ ++ .features = PHY_BASIC_FEATURES, \ ++ .soft_reset = dp83822_phy_reset, \ ++ .config_init = dp83822_config_init, \ ++ .get_wol = dp83822_get_wol, \ ++ .set_wol = dp83822_set_wol, \ ++ .ack_interrupt = dp83822_ack_interrupt, \ ++ .config_intr = dp83822_config_intr, \ ++ .suspend = dp83822_suspend, \ ++ .resume = dp83822_resume, \ ++ } ++ + static struct phy_driver dp83822_driver[] = { +- { +- .phy_id = DP83822_PHY_ID, +- .phy_id_mask = 0xfffffff0, +- .name = "TI DP83822", +- .features = PHY_BASIC_FEATURES, +- .config_init = dp83822_config_init, +- .soft_reset = dp83822_phy_reset, +- .get_wol = dp83822_get_wol, +- .set_wol = dp83822_set_wol, +- .ack_interrupt = dp83822_ack_interrupt, +- .config_intr = dp83822_config_intr, +- .suspend = dp83822_suspend, +- .resume = dp83822_resume, +- }, ++ DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"), ++ DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"), + }; + module_phy_driver(dp83822_driver); + + static struct mdio_device_id __maybe_unused dp83822_tbl[] = { + { DP83822_PHY_ID, 0xfffffff0 }, ++ { DP83825I_PHY_ID, 0xfffffff0 }, + { }, + }; + MODULE_DEVICE_TABLE(mdio, dp83822_tbl); +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +index 81970cf777c0..8cafa5a749ca 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +@@ -81,8 +81,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, + + mt76x02_insert_hdr_pad(skb); + +- txwi = skb_push(skb, sizeof(struct mt76x02_txwi)); ++ txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi)); + mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len); ++ skb_push(skb, sizeof(struct mt76x02_txwi)); + + pid = mt76_tx_status_skb_add(mdev, wcid, skb); + txwi->pktid = pid; +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c +index c9634a774705..2f618536ef2a 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c +@@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev) + gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; + gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust; + +- if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40) ++ val = 0x1836 << 16; ++ if (!mt76x2_has_ext_lna(dev) && ++ dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40) + val = 0x1e42 << 16; +- else +- val = 0x1836 << 16; ++ ++ if (mt76x2_has_ext_lna(dev) && ++ dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ && ++ dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40) ++ val = 0x0f36 << 16; + + val |= 0xf8; + +@@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) + { + u8 *gain = dev->cal.agc_gain_init; + u8 low_gain_delta, gain_delta; ++ u32 agc_35, agc_37; + bool gain_change; + int low_gain; + u32 val; +@@ -316,6 +322,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) + else + low_gain_delta = 14; + ++ agc_37 = 0x2121262c; ++ if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) ++ agc_35 = 0x11111516; ++ else if (low_gain == 2) ++ agc_35 = agc_37 = 0x08080808; ++ else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) ++ agc_35 = 0x10101014; ++ else ++ agc_35 = 0x11111116; ++ + if (low_gain == 2) { + mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); + mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); +@@ -324,15 +340,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) + dev->cal.agc_gain_adjust = 0; + } else { + mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); +- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) +- mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014); +- else +- mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116); +- mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C); + gain_delta = 0; + dev->cal.agc_gain_adjust = low_gain_delta; + } + ++ mt76_wr(dev, MT_BBP(AGC, 35), agc_35); ++ mt76_wr(dev, MT_BBP(AGC, 37), agc_37); ++ + dev->cal.agc_gain_cur[0] = gain[0] - gain_delta; + dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; + mt76x2_phy_set_gain_val(dev); +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c +index b9fff3b8ed1b..23da7beadd62 100644 +--- a/drivers/nvme/host/multipath.c ++++ b/drivers/nvme/host/multipath.c +@@ -366,15 +366,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state) + static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, + struct nvme_ns *ns) + { +- enum nvme_ana_state old; +- + mutex_lock(&ns->head->lock); +- old = ns->ana_state; + ns->ana_grpid = le32_to_cpu(desc->grpid); + ns->ana_state = desc->state; + clear_bit(NVME_NS_ANA_PENDING, &ns->flags); + +- if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old)) ++ if (nvme_state_is_live(ns->ana_state)) + nvme_mpath_set_live(ns); + mutex_unlock(&ns->head->lock); + } +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c +index 02c63c463222..7bad21a2283f 100644 +--- a/drivers/nvme/target/core.c ++++ b/drivers/nvme/target/core.c +@@ -517,7 +517,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns) + + ret = nvmet_p2pmem_ns_enable(ns); + if (ret) +- goto out_unlock; ++ goto out_dev_disable; + + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) + nvmet_p2pmem_ns_add_p2p(ctrl, ns); +@@ -558,7 +558,7 @@ out_unlock: + out_dev_put: + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) + pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); +- ++out_dev_disable: + nvmet_ns_dev_disable(ns); + goto out_unlock; + } +diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c +index 517522305e5c..9a0fa3943ca7 100644 +--- a/drivers/nvme/target/io-cmd-file.c ++++ b/drivers/nvme/target/io-cmd-file.c +@@ -75,11 +75,11 @@ err: + return ret; + } + +-static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter) ++static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg) + { +- bv->bv_page = sg_page_iter_page(iter); +- bv->bv_offset = iter->sg->offset; +- bv->bv_len = PAGE_SIZE - iter->sg->offset; ++ bv->bv_page = sg_page(sg); ++ bv->bv_offset = sg->offset; ++ bv->bv_len = sg->length; + } + + static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, +@@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2) + + static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) + { +- ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); +- struct sg_page_iter sg_pg_iter; ++ ssize_t nr_bvec = req->sg_cnt; + unsigned long bv_cnt = 0; + bool is_sync = false; + size_t len = 0, total_len = 0; + ssize_t ret = 0; + loff_t pos; +- ++ int i; ++ struct scatterlist *sg; + + if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC) + is_sync = true; +@@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) + } + + memset(&req->f.iocb, 0, sizeof(struct kiocb)); +- for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) { +- nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter); ++ for_each_sg(req->sg, sg, req->sg_cnt, i) { ++ nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg); + len += req->f.bvec[bv_cnt].bv_len; + total_len += req->f.bvec[bv_cnt].bv_len; + bv_cnt++; +@@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req) + + static void nvmet_file_execute_rw(struct nvmet_req *req) + { +- ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); ++ ssize_t nr_bvec = req->sg_cnt; + + if (!req->sg_cnt || !nr_bvec) { + nvmet_req_complete(req, 0); +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c +index df34bff4ac31..f73ce96e9603 100644 +--- a/drivers/s390/net/qeth_l3_main.c ++++ b/drivers/s390/net/qeth_l3_main.c +@@ -2316,12 +2316,14 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev) + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc; + ++ hash_init(card->ip_htable); ++ + if (gdev->dev.type == &qeth_generic_devtype) { + rc = qeth_l3_create_device_attributes(&gdev->dev); + if (rc) + return rc; + } +- hash_init(card->ip_htable); ++ + hash_init(card->ip_mc_htable); + card->info.hwtrap = 0; + return 0; +diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c +index db00b5e3abbe..33eddb02ee30 100644 +--- a/drivers/s390/scsi/zfcp_fc.c ++++ b/drivers/s390/scsi/zfcp_fc.c +@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, + list_for_each_entry(port, &adapter->port_list, list) { + if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range)) + zfcp_fc_test_link(port); +- if (!port->d_id) +- zfcp_erp_port_reopen(port, +- ZFCP_STATUS_COMMON_ERP_FAILED, +- "fcrscn1"); + } + read_unlock_irqrestore(&adapter->port_list_lock, flags); + } +@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, + static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) + { + struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data; ++ struct zfcp_adapter *adapter = fsf_req->adapter; + struct fc_els_rscn *head; + struct fc_els_rscn_page *page; + u16 i; +@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) + no_entries = be16_to_cpu(head->rscn_plen) / + sizeof(struct fc_els_rscn_page); + ++ if (no_entries > 1) { ++ /* handle failed ports */ ++ unsigned long flags; ++ struct zfcp_port *port; ++ ++ read_lock_irqsave(&adapter->port_list_lock, flags); ++ list_for_each_entry(port, &adapter->port_list, list) { ++ if (port->d_id) ++ continue; ++ zfcp_erp_port_reopen(port, ++ ZFCP_STATUS_COMMON_ERP_FAILED, ++ "fcrscn1"); ++ } ++ read_unlock_irqrestore(&adapter->port_list_lock, flags); ++ } ++ + for (i = 1; i < no_entries; i++) { + /* skip head and start with 1st element */ + page++; +diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h +index 3291d1c16864..8bd09b96ea18 100644 +--- a/drivers/scsi/aacraid/aacraid.h ++++ b/drivers/scsi/aacraid/aacraid.h +@@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) + return capacity; + } + ++static inline int aac_pci_offline(struct aac_dev *dev) ++{ ++ return pci_channel_offline(dev->pdev) || dev->handle_pci_error; ++} ++ + static inline int aac_adapter_check_health(struct aac_dev *dev) + { +- if (unlikely(pci_channel_offline(dev->pdev))) ++ if (unlikely(aac_pci_offline(dev))) + return -1; + + return (dev)->a_ops.adapter_check_health(dev); +diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c +index a3adc954f40f..09367b8a3885 100644 +--- a/drivers/scsi/aacraid/commsup.c ++++ b/drivers/scsi/aacraid/commsup.c +@@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, + return -ETIMEDOUT; + } + +- if (unlikely(pci_channel_offline(dev->pdev))) ++ if (unlikely(aac_pci_offline(dev))) + return -EFAULT; + + if ((blink = aac_adapter_check_health(dev)) > 0) { +@@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, + + spin_unlock_irqrestore(&fibptr->event_lock, flags); + +- if (unlikely(pci_channel_offline(dev->pdev))) ++ if (unlikely(aac_pci_offline(dev))) + return -EFAULT; + + fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c +index 0a6cb8f0680c..c39f88100f31 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +@@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) + + if (smid < ioc->hi_priority_smid) { + struct scsiio_tracker *st; ++ void *request; + + st = _get_st_from_smid(ioc, smid); + if (!st) { + _base_recovery_check(ioc); + return; + } ++ ++ /* Clear MPI request frame */ ++ request = mpt3sas_base_get_msg_frame(ioc, smid); ++ memset(request, 0, ioc->request_sz); ++ + mpt3sas_base_clear_st(ioc, st); + _base_recovery_check(ioc); + return; +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index 6be39dc27103..6173c211a5e5 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) + { + struct scsi_cmnd *scmd = NULL; + struct scsiio_tracker *st; ++ Mpi25SCSIIORequest_t *mpi_request; + + if (smid > 0 && + smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { + u32 unique_tag = smid - 1; + ++ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); ++ ++ /* ++ * If SCSI IO request is outstanding at driver level then ++ * DevHandle filed must be non-zero. If DevHandle is zero ++ * then it means that this smid is free at driver level, ++ * so return NULL. ++ */ ++ if (!mpi_request->DevHandle) ++ return scmd; ++ + scmd = scsi_host_find_tag(ioc->shost, unique_tag); + if (scmd) { + st = scsi_cmd_priv(scmd); +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c +index a77bfb224248..80289c885c07 100644 +--- a/drivers/scsi/qla4xxx/ql4_os.c ++++ b/drivers/scsi/qla4xxx/ql4_os.c +@@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) + return -EINVAL; + ep = iscsi_lookup_endpoint(transport_fd); ++ if (!ep) ++ return -EINVAL; + conn = cls_conn->dd_data; + qla_conn = conn->dd_data; + qla_conn->qla_ep = ep->dd_data; +diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig +index 687537203d9c..d9725888af6f 100644 +--- a/drivers/staging/axis-fifo/Kconfig ++++ b/drivers/staging/axis-fifo/Kconfig +@@ -3,6 +3,7 @@ + # + config XIL_AXIS_FIFO + tristate "Xilinx AXI-Stream FIFO IP core driver" ++ depends on OF + default n + help + This adds support for the Xilinx AXI-Stream +diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig +index d33533872a16..c8fa17cfa807 100644 +--- a/drivers/staging/mt7621-pci/Kconfig ++++ b/drivers/staging/mt7621-pci/Kconfig +@@ -1,6 +1,7 @@ + config PCI_MT7621 + tristate "MediaTek MT7621 PCI Controller" + depends on RALINK ++ depends on PCI + select PCI_DRIVERS_GENERIC + help + This selects a driver for the MediaTek MT7621 PCI Controller. +diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c +index 3b1ccd138c3f..6fb6ea29a8b6 100644 +--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c ++++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c +@@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) + + pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf; + +- rtw_alloc_hwxmits(padapter); ++ res = rtw_alloc_hwxmits(padapter); ++ if (res == _FAIL) ++ goto exit; + rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); + + for (i = 0; i < 4; i++) +@@ -1503,7 +1505,7 @@ exit: + return res; + } + +-void rtw_alloc_hwxmits(struct adapter *padapter) ++s32 rtw_alloc_hwxmits(struct adapter *padapter) + { + struct hw_xmit *hwxmits; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; +@@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter) + + pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry, + sizeof(struct hw_xmit), GFP_KERNEL); ++ if (!pxmitpriv->hwxmits) ++ return _FAIL; + + hwxmits = pxmitpriv->hwxmits; + +@@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter) + hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; + hwxmits[2] .sta_queue = &pxmitpriv->be_pending; + hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; ++ return _SUCCESS; + } + + void rtw_free_hwxmits(struct adapter *padapter) +diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h +index 788f59c74ea1..ba7e15fbde72 100644 +--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h ++++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h +@@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, + void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry); + s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); + void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv); +-void rtw_alloc_hwxmits(struct adapter *padapter); ++s32 rtw_alloc_hwxmits(struct adapter *padapter); + void rtw_free_hwxmits(struct adapter *padapter); + s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt); + +diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c +index 1920d02f7c9f..8c36acedf507 100644 +--- a/drivers/staging/rtl8712/rtl8712_cmd.c ++++ b/drivers/staging/rtl8712/rtl8712_cmd.c +@@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf) + + static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf) + { +- u32 val; +- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd); + struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; + +- if (pcmd->rsp && pcmd->rspsz > 0) +- memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz); +- pcmd_callback = cmd_callback[pcmd->cmdcode].callback; +- if (!pcmd_callback) +- r8712_free_cmd_obj(pcmd); +- else +- pcmd_callback(padapter, pcmd); ++ r8712_free_cmd_obj(pcmd); + return H2C_SUCCESS; + } + +diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h +index 92fb77666d44..1ef86b8c592f 100644 +--- a/drivers/staging/rtl8712/rtl8712_cmd.h ++++ b/drivers/staging/rtl8712/rtl8712_cmd.h +@@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd { + static struct _cmd_callback cmd_callback[] = { + {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/ + {GEN_CMD_CODE(_Write_MACREG), NULL}, +- {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback}, ++ {GEN_CMD_CODE(_Read_BBREG), NULL}, + {GEN_CMD_CODE(_Write_BBREG), NULL}, + {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback}, + {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/ +diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c +index 625e67f39889..a36b2213d8ee 100644 +--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c ++++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c +@@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) + } + } + +- rtw_alloc_hwxmits(padapter); ++ res = rtw_alloc_hwxmits(padapter); ++ if (res == _FAIL) ++ goto exit; + rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); + + for (i = 0; i < 4; i++) { +@@ -2144,7 +2146,7 @@ exit: + return res; + } + +-void rtw_alloc_hwxmits(struct adapter *padapter) ++s32 rtw_alloc_hwxmits(struct adapter *padapter) + { + struct hw_xmit *hwxmits; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; +@@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter) + + pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry); + +- if (pxmitpriv->hwxmits == NULL) { +- DBG_871X("alloc hwxmits fail!...\n"); +- return; +- } ++ if (!pxmitpriv->hwxmits) ++ return _FAIL; + + hwxmits = pxmitpriv->hwxmits; + +@@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter) + + } + +- ++ return _SUCCESS; + } + + void rtw_free_hwxmits(struct adapter *padapter) +diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h +index 1b38b9182b31..37f42b2f22f1 100644 +--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h ++++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h +@@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); + void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv); + + +-void rtw_alloc_hwxmits(struct adapter *padapter); ++s32 rtw_alloc_hwxmits(struct adapter *padapter); + void rtw_free_hwxmits(struct adapter *padapter); + + +diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c +index 9930ed954abb..4cc77b2016e1 100644 +--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c ++++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c +@@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv, + + rtlpriv->phydm.internal = + kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL); ++ if (!rtlpriv->phydm.internal) ++ return 0; + + _rtl_phydm_init_com_info(rtlpriv, ic, params); + +diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c +index a40396614814..c1ed52df05f0 100644 +--- a/drivers/staging/rtlwifi/rtl8822be/fw.c ++++ b/drivers/staging/rtlwifi/rtl8822be/fw.c +@@ -741,6 +741,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) + u1_rsvd_page_loc, 3); + + skb = dev_alloc_skb(totalpacketlen); ++ if (!skb) ++ return; + memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, + totalpacketlen); + +diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +index 804daf83be35..064d0db4c51e 100644 +--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c ++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +@@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev) + struct device_node *fw_node; + const struct of_device_id *of_id; + struct vchiq_drvdata *drvdata; ++ struct device *vchiq_dev; + int err; + + of_id = of_match_node(vchiq_of_match, pdev->dev.of_node); +@@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev) + goto failed_platform_init; + } + +- if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid, +- NULL, "vchiq"))) ++ vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL, ++ "vchiq"); ++ if (IS_ERR(vchiq_dev)) { ++ err = PTR_ERR(vchiq_dev); + goto failed_device_create; ++ } + + vchiq_debugfs_init(); + +diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c +index db5df3d54818..3bdd56a1021b 100644 +--- a/drivers/tty/serial/ar933x_uart.c ++++ b/drivers/tty/serial/ar933x_uart.c +@@ -49,11 +49,6 @@ struct ar933x_uart_port { + struct clk *clk; + }; + +-static inline bool ar933x_uart_console_enabled(void) +-{ +- return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE); +-} +- + static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up, + int offset) + { +@@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = { + .verify_port = ar933x_uart_verify_port, + }; + ++#ifdef CONFIG_SERIAL_AR933X_CONSOLE + static struct ar933x_uart_port * + ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS]; + +@@ -604,14 +600,7 @@ static struct console ar933x_uart_console = { + .index = -1, + .data = &ar933x_uart_driver, + }; +- +-static void ar933x_uart_add_console_port(struct ar933x_uart_port *up) +-{ +- if (!ar933x_uart_console_enabled()) +- return; +- +- ar933x_console_ports[up->port.line] = up; +-} ++#endif /* CONFIG_SERIAL_AR933X_CONSOLE */ + + static struct uart_driver ar933x_uart_driver = { + .owner = THIS_MODULE, +@@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev) + baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP); + up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD); + +- ar933x_uart_add_console_port(up); ++#ifdef CONFIG_SERIAL_AR933X_CONSOLE ++ ar933x_console_ports[up->port.line] = up; ++#endif + + ret = uart_add_one_port(&ar933x_uart_driver, &up->port); + if (ret) +@@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void) + { + int ret; + +- if (ar933x_uart_console_enabled()) +- ar933x_uart_driver.cons = &ar933x_uart_console; ++#ifdef CONFIG_SERIAL_AR933X_CONSOLE ++ ar933x_uart_driver.cons = &ar933x_uart_console; ++#endif + + ret = uart_register_driver(&ar933x_uart_driver); + if (ret) +diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c +index 268098681856..114e94f476c6 100644 +--- a/drivers/tty/serial/sc16is7xx.c ++++ b/drivers/tty/serial/sc16is7xx.c +@@ -1509,7 +1509,7 @@ static int __init sc16is7xx_init(void) + ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver); + if (ret < 0) { + pr_err("failed to init sc16is7xx i2c --> %d\n", ret); +- return ret; ++ goto err_i2c; + } + #endif + +@@ -1517,10 +1517,18 @@ static int __init sc16is7xx_init(void) + ret = spi_register_driver(&sc16is7xx_spi_uart_driver); + if (ret < 0) { + pr_err("failed to init sc16is7xx spi --> %d\n", ret); +- return ret; ++ goto err_spi; + } + #endif + return ret; ++ ++err_spi: ++#ifdef CONFIG_SERIAL_SC16IS7XX_I2C ++ i2c_del_driver(&sc16is7xx_i2c_uart_driver); ++#endif ++err_i2c: ++ uart_unregister_driver(&sc16is7xx_uart); ++ return ret; + } + module_init(sc16is7xx_init); + +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c +index fdc6e4e403e8..8cced3609e24 100644 +--- a/drivers/usb/dwc3/dwc3-pci.c ++++ b/drivers/usb/dwc3/dwc3-pci.c +@@ -29,6 +29,7 @@ + #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa + #define PCI_DEVICE_ID_INTEL_APL 0x5aaa + #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 ++#define PCI_DEVICE_ID_INTEL_CMLH 0x02ee + #define PCI_DEVICE_ID_INTEL_GLK 0x31aa + #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee + #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e +@@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), + (kernel_ulong_t) &dwc3_pci_mrfld_properties, }, + ++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH), ++ (kernel_ulong_t) &dwc3_pci_intel_properties, }, ++ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + +diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c +index b77f3126580e..c2011cd7df8c 100644 +--- a/drivers/usb/gadget/udc/net2272.c ++++ b/drivers/usb/gadget/udc/net2272.c +@@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req) + break; + } + if (&req->req != _req) { ++ ep->stopped = stopped; + spin_unlock_irqrestore(&ep->dev->lock, flags); + return -EINVAL; + } +diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c +index e7dae5379e04..d93cf4171953 100644 +--- a/drivers/usb/gadget/udc/net2280.c ++++ b/drivers/usb/gadget/udc/net2280.c +@@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma) + (void) readl(&ep->dev->pci->pcimstctl); + + writel(BIT(DMA_START), &dma->dmastat); +- +- if (!ep->is_in) +- stop_out_naking(ep); + } + + static void start_dma(struct net2280_ep *ep, struct net2280_request *req) +@@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req) + writel(BIT(DMA_START), &dma->dmastat); + return; + } ++ stop_out_naking(ep); + } + + tmp = dmactl_default; +@@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req) + break; + } + if (&req->req != _req) { ++ ep->stopped = stopped; + spin_unlock_irqrestore(&ep->dev->lock, flags); +- dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n", +- __func__); ++ ep_dbg(ep->dev, "%s: Request mismatch\n", __func__); + return -EINVAL; + } + +diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c +index 5b8a3d9530c4..5cac83aaeac3 100644 +--- a/drivers/usb/host/u132-hcd.c ++++ b/drivers/usb/host/u132-hcd.c +@@ -3202,6 +3202,9 @@ static int __init u132_hcd_init(void) + printk(KERN_INFO "driver %s\n", hcd_name); + workqueue = create_singlethread_workqueue("u132"); + retval = platform_driver_register(&u132_platform_driver); ++ if (retval) ++ destroy_workqueue(workqueue); ++ + return retval; + } + +diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c +index a6efb9a72939..5f7734c729b1 100644 +--- a/drivers/usb/misc/usb251xb.c ++++ b/drivers/usb/misc/usb251xb.c +@@ -601,7 +601,7 @@ static int usb251xb_probe(struct usb251xb *hub) + dev); + int err; + +- if (np) { ++ if (np && of_id) { + err = usb251xb_get_ofdata(hub, + (struct usb251xb_data *)of_id->data); + if (err) { +diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c +index ca08c83168f5..0b37867b5c20 100644 +--- a/fs/afs/fsclient.c ++++ b/fs/afs/fsclient.c +@@ -1515,8 +1515,8 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr) + + xdr_encode_AFS_StoreStatus(&bp, attr); + +- *bp++ = 0; /* position of start of write */ +- *bp++ = 0; ++ *bp++ = htonl(attr->ia_size >> 32); /* position of start of write */ ++ *bp++ = htonl((u32) attr->ia_size); + *bp++ = 0; /* size of write */ + *bp++ = 0; + *bp++ = htonl(attr->ia_size >> 32); /* new file length */ +@@ -1564,7 +1564,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr) + + xdr_encode_AFS_StoreStatus(&bp, attr); + +- *bp++ = 0; /* position of start of write */ ++ *bp++ = htonl(attr->ia_size); /* position of start of write */ + *bp++ = 0; /* size of write */ + *bp++ = htonl(attr->ia_size); /* new file length */ + +diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c +index 5aa57929e8c2..6e97a42d24d1 100644 +--- a/fs/afs/yfsclient.c ++++ b/fs/afs/yfsclient.c +@@ -1514,7 +1514,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr) + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_YFS_StoreStatus(bp, attr); +- bp = xdr_encode_u64(bp, 0); /* position of start of write */ ++ bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */ + bp = xdr_encode_u64(bp, 0); /* size of write */ + bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */ + yfs_check_req(call, bp); +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index 4ec2b660d014..7f3ece91a4d0 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -1886,8 +1886,10 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) + } + } + +-static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) ++static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans) + { ++ struct btrfs_fs_info *fs_info = trans->fs_info; ++ + /* + * We use writeback_inodes_sb here because if we used + * btrfs_start_delalloc_roots we would deadlock with fs freeze. +@@ -1897,15 +1899,50 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) + * from already being in a transaction and our join_transaction doesn't + * have to re-take the fs freeze lock. + */ +- if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) ++ if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) { + writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC); ++ } else { ++ struct btrfs_pending_snapshot *pending; ++ struct list_head *head = &trans->transaction->pending_snapshots; ++ ++ /* ++ * Flush dellaloc for any root that is going to be snapshotted. ++ * This is done to avoid a corrupted version of files, in the ++ * snapshots, that had both buffered and direct IO writes (even ++ * if they were done sequentially) due to an unordered update of ++ * the inode's size on disk. ++ */ ++ list_for_each_entry(pending, head, list) { ++ int ret; ++ ++ ret = btrfs_start_delalloc_snapshot(pending->root); ++ if (ret) ++ return ret; ++ } ++ } + return 0; + } + +-static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) ++static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans) + { +- if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) ++ struct btrfs_fs_info *fs_info = trans->fs_info; ++ ++ if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) { + btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); ++ } else { ++ struct btrfs_pending_snapshot *pending; ++ struct list_head *head = &trans->transaction->pending_snapshots; ++ ++ /* ++ * Wait for any dellaloc that we started previously for the roots ++ * that are going to be snapshotted. This is to avoid a corrupted ++ * version of files in the snapshots that had both buffered and ++ * direct IO writes (even if they were done sequentially). ++ */ ++ list_for_each_entry(pending, head, list) ++ btrfs_wait_ordered_extents(pending->root, ++ U64_MAX, 0, U64_MAX); ++ } + } + + int btrfs_commit_transaction(struct btrfs_trans_handle *trans) +@@ -2024,7 +2061,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) + + extwriter_counter_dec(cur_trans, trans->type); + +- ret = btrfs_start_delalloc_flush(fs_info); ++ ret = btrfs_start_delalloc_flush(trans); + if (ret) + goto cleanup_transaction; + +@@ -2040,7 +2077,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) + if (ret) + goto cleanup_transaction; + +- btrfs_wait_delalloc_flush(fs_info); ++ btrfs_wait_delalloc_flush(trans); + + btrfs_scrub_pause(fs_info); + /* +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c +index 9d1f34d46627..f7f9e305aaf8 100644 +--- a/fs/ceph/inode.c ++++ b/fs/ceph/inode.c +@@ -524,6 +524,7 @@ static void ceph_i_callback(struct rcu_head *head) + struct inode *inode = container_of(head, struct inode, i_rcu); + struct ceph_inode_info *ci = ceph_inode(inode); + ++ kfree(ci->i_symlink); + kmem_cache_free(ceph_inode_cachep, ci); + } + +@@ -561,7 +562,6 @@ void ceph_destroy_inode(struct inode *inode) + ceph_put_snap_realm(mdsc, realm); + } + +- kfree(ci->i_symlink); + while ((n = rb_first(&ci->i_fragtree)) != NULL) { + frag = rb_entry(n, struct ceph_inode_frag, node); + rb_erase(n, &ci->i_fragtree); +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index 809c0f2f9942..64f4de983468 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -2034,10 +2034,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, + rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len; + + ret = -EINVAL; +- if (rem < len) { +- pipe_unlock(pipe); +- goto out; +- } ++ if (rem < len) ++ goto out_free; + + rem = len; + while (rem) { +@@ -2055,7 +2053,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, + pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); + pipe->nrbufs--; + } else { +- pipe_buf_get(pipe, ibuf); ++ if (!pipe_buf_get(pipe, ibuf)) ++ goto out_free; ++ + *obuf = *ibuf; + obuf->flags &= ~PIPE_BUF_FLAG_GIFT; + obuf->len = rem; +@@ -2078,11 +2078,11 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, + ret = fuse_dev_do_write(fud, &cs, len); + + pipe_lock(pipe); ++out_free: + for (idx = 0; idx < nbuf; idx++) + pipe_buf_release(pipe, &bufs[idx]); + pipe_unlock(pipe); + +-out: + kvfree(bufs); + return ret; + } +diff --git a/fs/nfs/client.c b/fs/nfs/client.c +index fb1cf1a4bda2..90d71fda65ce 100644 +--- a/fs/nfs/client.c ++++ b/fs/nfs/client.c +@@ -453,7 +453,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto, + case XPRT_TRANSPORT_RDMA: + if (retrans == NFS_UNSPEC_RETRANS) + to->to_retries = NFS_DEF_TCP_RETRANS; +- if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0) ++ if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0) + to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10; + if (to->to_initval > NFS_MAX_TCP_TIMEOUT) + to->to_initval = NFS_MAX_TCP_TIMEOUT; +diff --git a/fs/pipe.c b/fs/pipe.c +index c51750ed4011..2a297bce381f 100644 +--- a/fs/pipe.c ++++ b/fs/pipe.c +@@ -189,9 +189,9 @@ EXPORT_SYMBOL(generic_pipe_buf_steal); + * in the tee() system call, when we duplicate the buffers in one + * pipe into another. + */ +-void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) ++bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) + { +- get_page(buf->page); ++ return try_get_page(buf->page); + } + EXPORT_SYMBOL(generic_pipe_buf_get); + +diff --git a/fs/splice.c b/fs/splice.c +index 7da7d5437472..c38c7e7a49c9 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -1588,7 +1588,11 @@ retry: + * Get a reference to this pipe buffer, + * so we can copy the contents over. + */ +- pipe_buf_get(ipipe, ibuf); ++ if (!pipe_buf_get(ipipe, ibuf)) { ++ if (ret == 0) ++ ret = -EFAULT; ++ break; ++ } + *obuf = *ibuf; + + /* +@@ -1662,7 +1666,11 @@ static int link_pipe(struct pipe_inode_info *ipipe, + * Get a reference to this pipe buffer, + * so we can copy the contents over. + */ +- pipe_buf_get(ipipe, ibuf); ++ if (!pipe_buf_get(ipipe, ibuf)) { ++ if (ret == 0) ++ ret = -EFAULT; ++ break; ++ } + + obuf = opipe->bufs + nbuf; + *obuf = *ibuf; +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 80bb6408fe73..7000ddd807e0 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -965,6 +965,10 @@ static inline bool is_pci_p2pdma_page(const struct page *page) + } + #endif /* CONFIG_DEV_PAGEMAP_OPS */ + ++/* 127: arbitrary random number, small enough to assemble well */ ++#define page_ref_zero_or_close_to_overflow(page) \ ++ ((unsigned int) page_ref_count(page) + 127u <= 127u) ++ + static inline void get_page(struct page *page) + { + page = compound_head(page); +@@ -972,8 +976,17 @@ static inline void get_page(struct page *page) + * Getting a normal page or the head of a compound page + * requires to already have an elevated page->_refcount. + */ +- VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); ++ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page); ++ page_ref_inc(page); ++} ++ ++static inline __must_check bool try_get_page(struct page *page) ++{ ++ page = compound_head(page); ++ if (WARN_ON_ONCE(page_ref_count(page) <= 0)) ++ return false; + page_ref_inc(page); ++ return true; + } + + static inline void put_page(struct page *page) +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h +index 66ee63cd5968..7897a3cc05b9 100644 +--- a/include/linux/pipe_fs_i.h ++++ b/include/linux/pipe_fs_i.h +@@ -108,18 +108,20 @@ struct pipe_buf_operations { + /* + * Get a reference to the pipe buffer. + */ +- void (*get)(struct pipe_inode_info *, struct pipe_buffer *); ++ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); + }; + + /** + * pipe_buf_get - get a reference to a pipe_buffer + * @pipe: the pipe that the buffer belongs to + * @buf: the buffer to get a reference to ++ * ++ * Return: %true if the reference was successfully obtained. + */ +-static inline void pipe_buf_get(struct pipe_inode_info *pipe, ++static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) + { +- buf->ops->get(pipe, buf); ++ return buf->ops->get(pipe, buf); + } + + /** +@@ -178,7 +180,7 @@ struct pipe_inode_info *alloc_pipe_info(void); + void free_pipe_info(struct pipe_inode_info *); + + /* Generic pipe buffer ops functions */ +-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); ++bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); + int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); + int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); + int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *); +diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h +index 13789d10a50e..76b8399b17f6 100644 +--- a/include/linux/sched/signal.h ++++ b/include/linux/sched/signal.h +@@ -417,10 +417,20 @@ static inline void set_restore_sigmask(void) + set_thread_flag(TIF_RESTORE_SIGMASK); + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); + } ++ ++static inline void clear_tsk_restore_sigmask(struct task_struct *tsk) ++{ ++ clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK); ++} ++ + static inline void clear_restore_sigmask(void) + { + clear_thread_flag(TIF_RESTORE_SIGMASK); + } ++static inline bool test_tsk_restore_sigmask(struct task_struct *tsk) ++{ ++ return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK); ++} + static inline bool test_restore_sigmask(void) + { + return test_thread_flag(TIF_RESTORE_SIGMASK); +@@ -438,6 +448,10 @@ static inline void set_restore_sigmask(void) + current->restore_sigmask = true; + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); + } ++static inline void clear_tsk_restore_sigmask(struct task_struct *tsk) ++{ ++ tsk->restore_sigmask = false; ++} + static inline void clear_restore_sigmask(void) + { + current->restore_sigmask = false; +@@ -446,6 +460,10 @@ static inline bool test_restore_sigmask(void) + { + return current->restore_sigmask; + } ++static inline bool test_tsk_restore_sigmask(struct task_struct *tsk) ++{ ++ return tsk->restore_sigmask; ++} + static inline bool test_and_clear_restore_sigmask(void) + { + if (!current->restore_sigmask) +diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h +index ef8dd0db70ce..56935bf027a7 100644 +--- a/include/net/tc_act/tc_gact.h ++++ b/include/net/tc_act/tc_gact.h +@@ -56,7 +56,7 @@ static inline bool is_tcf_gact_goto_chain(const struct tc_action *a) + + static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a) + { +- return a->goto_chain->index; ++ return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK; + } + + #endif /* __NET_TC_GACT_H */ +diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h +index 13acb9803a6d..05d39e579953 100644 +--- a/include/net/xdp_sock.h ++++ b/include/net/xdp_sock.h +@@ -36,7 +36,6 @@ struct xdp_umem { + u32 headroom; + u32 chunk_size_nohr; + struct user_struct *user; +- struct pid *pid; + unsigned long address; + refcount_t users; + struct work_struct work; +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index 771e93f9c43f..6f357f4fc859 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -29,6 +29,7 @@ + #include <linux/hw_breakpoint.h> + #include <linux/cn_proc.h> + #include <linux/compat.h> ++#include <linux/sched/signal.h> + + /* + * Access another process' address space via ptrace. +@@ -924,18 +925,26 @@ int ptrace_request(struct task_struct *child, long request, + ret = ptrace_setsiginfo(child, &siginfo); + break; + +- case PTRACE_GETSIGMASK: ++ case PTRACE_GETSIGMASK: { ++ sigset_t *mask; ++ + if (addr != sizeof(sigset_t)) { + ret = -EINVAL; + break; + } + +- if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t))) ++ if (test_tsk_restore_sigmask(child)) ++ mask = &child->saved_sigmask; ++ else ++ mask = &child->blocked; ++ ++ if (copy_to_user(datavp, mask, sizeof(sigset_t))) + ret = -EFAULT; + else + ret = 0; + + break; ++ } + + case PTRACE_SETSIGMASK: { + sigset_t new_set; +@@ -961,6 +970,8 @@ int ptrace_request(struct task_struct *child, long request, + child->blocked = new_set; + spin_unlock_irq(&child->sighand->siglock); + ++ clear_tsk_restore_sigmask(child); ++ + ret = 0; + break; + } +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index d07fc2836786..3842773b8aee 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -6843,12 +6843,16 @@ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, + buf->private = 0; + } + +-static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, ++static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) + { + struct buffer_ref *ref = (struct buffer_ref *)buf->private; + ++ if (refcount_read(&ref->refcount) > INT_MAX/2) ++ return false; ++ + refcount_inc(&ref->refcount); ++ return true; + } + + /* Pipe buffer operations for a buffer. */ +diff --git a/lib/sbitmap.c b/lib/sbitmap.c +index 5b382c1244ed..155fe38756ec 100644 +--- a/lib/sbitmap.c ++++ b/lib/sbitmap.c +@@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); + void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, + unsigned int cpu) + { ++ /* ++ * Once the clear bit is set, the bit may be allocated out. ++ * ++ * Orders READ/WRITE on the asssociated instance(such as request ++ * of blk_mq) by this bit for avoiding race with re-allocation, ++ * and its pair is the memory barrier implied in __sbitmap_get_word. ++ * ++ * One invariant is that the clear bit has to be zero when the bit ++ * is in use. ++ */ ++ smp_mb__before_atomic(); + sbitmap_deferred_clear_bit(&sbq->sb, nr); + + /* +diff --git a/mm/gup.c b/mm/gup.c +index 75029649baca..81e0bdefa2cc 100644 +--- a/mm/gup.c ++++ b/mm/gup.c +@@ -157,8 +157,12 @@ retry: + goto retry; + } + +- if (flags & FOLL_GET) +- get_page(page); ++ if (flags & FOLL_GET) { ++ if (unlikely(!try_get_page(page))) { ++ page = ERR_PTR(-ENOMEM); ++ goto out; ++ } ++ } + if (flags & FOLL_TOUCH) { + if ((flags & FOLL_WRITE) && + !pte_dirty(pte) && !PageDirty(page)) +@@ -295,7 +299,10 @@ retry_locked: + if (pmd_trans_unstable(pmd)) + ret = -EBUSY; + } else { +- get_page(page); ++ if (unlikely(!try_get_page(page))) { ++ spin_unlock(ptl); ++ return ERR_PTR(-ENOMEM); ++ } + spin_unlock(ptl); + lock_page(page); + ret = split_huge_page(page); +@@ -497,7 +504,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, + if (is_device_public_page(*page)) + goto unmap; + } +- get_page(*page); ++ if (unlikely(!try_get_page(*page))) { ++ ret = -ENOMEM; ++ goto unmap; ++ } + out: + ret = 0; + unmap: +@@ -1393,6 +1403,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) + } + } + ++/* ++ * Return the compund head page with ref appropriately incremented, ++ * or NULL if that failed. ++ */ ++static inline struct page *try_get_compound_head(struct page *page, int refs) ++{ ++ struct page *head = compound_head(page); ++ if (WARN_ON_ONCE(page_ref_count(head) < 0)) ++ return NULL; ++ if (unlikely(!page_cache_add_speculative(head, refs))) ++ return NULL; ++ return head; ++} ++ + #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL + static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +@@ -1427,9 +1451,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, + + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + page = pte_page(pte); +- head = compound_head(page); + +- if (!page_cache_get_speculative(head)) ++ head = try_get_compound_head(page, 1); ++ if (!head) + goto pte_unmap; + + if (unlikely(pte_val(pte) != pte_val(*ptep))) { +@@ -1568,8 +1592,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, + refs++; + } while (addr += PAGE_SIZE, addr != end); + +- head = compound_head(pmd_page(orig)); +- if (!page_cache_add_speculative(head, refs)) { ++ head = try_get_compound_head(pmd_page(orig), refs); ++ if (!head) { + *nr -= refs; + return 0; + } +@@ -1606,8 +1630,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, + refs++; + } while (addr += PAGE_SIZE, addr != end); + +- head = compound_head(pud_page(orig)); +- if (!page_cache_add_speculative(head, refs)) { ++ head = try_get_compound_head(pud_page(orig), refs); ++ if (!head) { + *nr -= refs; + return 0; + } +@@ -1643,8 +1667,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, + refs++; + } while (addr += PAGE_SIZE, addr != end); + +- head = compound_head(pgd_page(orig)); +- if (!page_cache_add_speculative(head, refs)) { ++ head = try_get_compound_head(pgd_page(orig), refs); ++ if (!head) { + *nr -= refs; + return 0; + } +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 8dfdffc34a99..c220315dc533 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -4298,6 +4298,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, + + pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; + page = pte_page(huge_ptep_get(pte)); ++ ++ /* ++ * Instead of doing 'try_get_page()' below in the same_page ++ * loop, just check the count once here. ++ */ ++ if (unlikely(page_count(page) <= 0)) { ++ if (pages) { ++ spin_unlock(ptl); ++ remainder = 0; ++ err = -ENOMEM; ++ break; ++ } ++ } + same_page: + if (pages) { + pages[i] = mem_map_offset(page, pfn_offset); +diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h +index ea51b2d898ec..c980ce43e3ba 100644 +--- a/mm/kasan/kasan.h ++++ b/mm/kasan/kasan.h +@@ -164,7 +164,10 @@ static inline u8 random_tag(void) + #endif + + #ifndef arch_kasan_set_tag +-#define arch_kasan_set_tag(addr, tag) ((void *)(addr)) ++static inline const void *arch_kasan_set_tag(const void *addr, u8 tag) ++{ ++ return addr; ++} + #endif + #ifndef arch_kasan_reset_tag + #define arch_kasan_reset_tag(addr) ((void *)(addr)) +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c +index 40d058378b52..fc605758323b 100644 +--- a/net/bridge/br_netfilter_hooks.c ++++ b/net/bridge/br_netfilter_hooks.c +@@ -502,6 +502,7 @@ static unsigned int br_nf_pre_routing(void *priv, + nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr; + + skb->protocol = htons(ETH_P_IP); ++ skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4; + + NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb, + skb->dev, NULL, +diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c +index 564710f88f93..e88d6641647b 100644 +--- a/net/bridge/br_netfilter_ipv6.c ++++ b/net/bridge/br_netfilter_ipv6.c +@@ -235,6 +235,8 @@ unsigned int br_nf_pre_routing_ipv6(void *priv, + nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr; + + skb->protocol = htons(ETH_P_IPV6); ++ skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); ++ + NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb, + skb->dev, NULL, + br_nf_pre_routing_finish_ipv6); +diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c +index 1059894a6f4c..4cb83fb69844 100644 +--- a/net/ipv6/netfilter/ip6t_srh.c ++++ b/net/ipv6/netfilter/ip6t_srh.c +@@ -210,6 +210,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par) + psidoff = srhoff + sizeof(struct ipv6_sr_hdr) + + ((srh->segments_left + 1) * sizeof(struct in6_addr)); + psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid); ++ if (!psid) ++ return false; + if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_PSID, + ipv6_masked_addr_cmp(psid, &srhinfo->psid_msk, + &srhinfo->psid_addr))) +@@ -223,6 +225,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par) + nsidoff = srhoff + sizeof(struct ipv6_sr_hdr) + + ((srh->segments_left - 1) * sizeof(struct in6_addr)); + nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid); ++ if (!nsid) ++ return false; + if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NSID, + ipv6_masked_addr_cmp(nsid, &srhinfo->nsid_msk, + &srhinfo->nsid_addr))) +@@ -233,6 +237,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par) + if (srhinfo->mt_flags & IP6T_SRH_LSID) { + lsidoff = srhoff + sizeof(struct ipv6_sr_hdr); + lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid); ++ if (!lsid) ++ return false; + if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LSID, + ipv6_masked_addr_cmp(lsid, &srhinfo->lsid_msk, + &srhinfo->lsid_addr))) +diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig +index beb3a69ce1d4..0f0e5806bf77 100644 +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -995,6 +995,7 @@ config NETFILTER_XT_TARGET_TEE + depends on NETFILTER_ADVANCED + depends on IPV6 || IPV6=n + depends on !NF_CONNTRACK || NF_CONNTRACK ++ depends on IP6_NF_IPTABLES || !IP6_NF_IPTABLES + select NF_DUP_IPV4 + select NF_DUP_IPV6 if IP6_NF_IPTABLES + ---help--- +diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c +index fa61208371f8..321a0036fdf5 100644 +--- a/net/netfilter/nft_set_rbtree.c ++++ b/net/netfilter/nft_set_rbtree.c +@@ -308,10 +308,6 @@ static void *nft_rbtree_deactivate(const struct net *net, + else if (d > 0) + parent = parent->rb_right; + else { +- if (!nft_set_elem_active(&rbe->ext, genmask)) { +- parent = parent->rb_left; +- continue; +- } + if (nft_rbtree_interval_end(rbe) && + !nft_rbtree_interval_end(this)) { + parent = parent->rb_left; +@@ -320,6 +316,9 @@ static void *nft_rbtree_deactivate(const struct net *net, + nft_rbtree_interval_end(this)) { + parent = parent->rb_right; + continue; ++ } else if (!nft_set_elem_active(&rbe->ext, genmask)) { ++ parent = parent->rb_left; ++ continue; + } + nft_rbtree_flush(net, set, rbe); + return rbe; +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c +index 7754aa3e434f..f88c2bd1335a 100644 +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -486,8 +486,8 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg, + int flags, struct rpc_rqst *req) + { + struct xdr_buf *buf = &req->rq_private_buf; +- size_t want, read; +- ssize_t ret; ++ size_t want, uninitialized_var(read); ++ ssize_t uninitialized_var(ret); + + xs_read_header(transport, buf); + +diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c +index 37e1fe180769..9c767c68ed3a 100644 +--- a/net/xdp/xdp_umem.c ++++ b/net/xdp/xdp_umem.c +@@ -189,9 +189,6 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem) + + static void xdp_umem_release(struct xdp_umem *umem) + { +- struct task_struct *task; +- struct mm_struct *mm; +- + xdp_umem_clear_dev(umem); + + if (umem->fq) { +@@ -208,21 +205,10 @@ static void xdp_umem_release(struct xdp_umem *umem) + + xdp_umem_unpin_pages(umem); + +- task = get_pid_task(umem->pid, PIDTYPE_PID); +- put_pid(umem->pid); +- if (!task) +- goto out; +- mm = get_task_mm(task); +- put_task_struct(task); +- if (!mm) +- goto out; +- +- mmput(mm); + kfree(umem->pages); + umem->pages = NULL; + + xdp_umem_unaccount_pages(umem); +-out: + kfree(umem); + } + +@@ -351,7 +337,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) + if (size_chk < 0) + return -EINVAL; + +- umem->pid = get_task_pid(current, PIDTYPE_PID); + umem->address = (unsigned long)addr; + umem->chunk_mask = ~((u64)chunk_size - 1); + umem->size = size; +@@ -367,7 +352,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) + + err = xdp_umem_account_pages(umem); + if (err) +- goto out; ++ return err; + + err = xdp_umem_pin_pages(umem); + if (err) +@@ -386,8 +371,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) + + out_account: + xdp_umem_unaccount_pages(umem); +-out: +- put_pid(umem->pid); + return err; + } + +diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c +index 611945611bf8..1dcfb288ee63 100644 +--- a/scripts/kconfig/lxdialog/inputbox.c ++++ b/scripts/kconfig/lxdialog/inputbox.c +@@ -113,7 +113,8 @@ do_resize: + case KEY_DOWN: + break; + case KEY_BACKSPACE: +- case 127: ++ case 8: /* ^H */ ++ case 127: /* ^? */ + if (pos) { + wattrset(dialog, dlg.inputbox.atr); + if (input_x == 0) { +diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c +index a4670f4e825a..ac92c0ded6c5 100644 +--- a/scripts/kconfig/nconf.c ++++ b/scripts/kconfig/nconf.c +@@ -1048,7 +1048,7 @@ static int do_match(int key, struct match_state *state, int *ans) + state->match_direction = FIND_NEXT_MATCH_UP; + *ans = get_mext_match(state->pattern, + state->match_direction); +- } else if (key == KEY_BACKSPACE || key == 127) { ++ } else if (key == KEY_BACKSPACE || key == 8 || key == 127) { + state->pattern[strlen(state->pattern)-1] = '\0'; + adj_match_dir(&state->match_direction); + } else +diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c +index 7be620a1fcdb..77f525a8617c 100644 +--- a/scripts/kconfig/nconf.gui.c ++++ b/scripts/kconfig/nconf.gui.c +@@ -439,7 +439,8 @@ int dialog_inputbox(WINDOW *main_window, + case KEY_F(F_EXIT): + case KEY_F(F_BACK): + break; +- case 127: ++ case 8: /* ^H */ ++ case 127: /* ^? */ + case KEY_BACKSPACE: + if (cursor_position > 0) { + memmove(&result[cursor_position-1], +diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c +index 1ceedea847dd..544ca126a8a8 100644 +--- a/scripts/selinux/genheaders/genheaders.c ++++ b/scripts/selinux/genheaders/genheaders.c +@@ -9,7 +9,6 @@ + #include <string.h> + #include <errno.h> + #include <ctype.h> +-#include <sys/socket.h> + + struct security_class_mapping { + const char *name; +diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c +index 073fe7537f6c..6d51b74bc679 100644 +--- a/scripts/selinux/mdp/mdp.c ++++ b/scripts/selinux/mdp/mdp.c +@@ -32,7 +32,6 @@ + #include <stdlib.h> + #include <unistd.h> + #include <string.h> +-#include <sys/socket.h> + + static void usage(char *name) + { +diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h +index bd5fe0d3204a..201f7e588a29 100644 +--- a/security/selinux/include/classmap.h ++++ b/security/selinux/include/classmap.h +@@ -1,5 +1,6 @@ + /* SPDX-License-Identifier: GPL-2.0 */ + #include <linux/capability.h> ++#include <linux/socket.h> + + #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \ + "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map" +diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c +index d68eb4fb40cc..2b0e02c38870 100644 +--- a/tools/build/feature/test-libopencsd.c ++++ b/tools/build/feature/test-libopencsd.c +@@ -4,9 +4,9 @@ + /* + * Check OpenCSD library version is sufficient to provide required features + */ +-#define OCSD_MIN_VER ((0 << 16) | (10 << 8) | (0)) ++#define OCSD_MIN_VER ((0 << 16) | (11 << 8) | (0)) + #if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER) +-#error "OpenCSD >= 0.10.0 is required" ++#error "OpenCSD >= 0.11.0 is required" + #endif + + int main(void) +diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c +index 8c155575c6c5..2a8bf6b45a30 100644 +--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c ++++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c +@@ -374,6 +374,7 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder, + break; + case OCSD_INSTR_ISB: + case OCSD_INSTR_DSB_DMB: ++ case OCSD_INSTR_WFI_WFE: + case OCSD_INSTR_OTHER: + default: + packet->last_instr_taken_branch = false; +diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c +index 143f7057d581..596db1daee35 100644 +--- a/tools/perf/util/machine.c ++++ b/tools/perf/util/machine.c +@@ -1358,6 +1358,20 @@ static void machine__set_kernel_mmap(struct machine *machine, + machine->vmlinux_map->end = ~0ULL; + } + ++static void machine__update_kernel_mmap(struct machine *machine, ++ u64 start, u64 end) ++{ ++ struct map *map = machine__kernel_map(machine); ++ ++ map__get(map); ++ map_groups__remove(&machine->kmaps, map); ++ ++ machine__set_kernel_mmap(machine, start, end); ++ ++ map_groups__insert(&machine->kmaps, map); ++ map__put(map); ++} ++ + int machine__create_kernel_maps(struct machine *machine) + { + struct dso *kernel = machine__get_kernel(machine); +@@ -1390,17 +1404,11 @@ int machine__create_kernel_maps(struct machine *machine) + goto out_put; + } + +- /* we have a real start address now, so re-order the kmaps */ +- map = machine__kernel_map(machine); +- +- map__get(map); +- map_groups__remove(&machine->kmaps, map); +- +- /* assume it's the last in the kmaps */ +- machine__set_kernel_mmap(machine, addr, ~0ULL); +- +- map_groups__insert(&machine->kmaps, map); +- map__put(map); ++ /* ++ * we have a real start address now, so re-order the kmaps ++ * assume it's the last in the kmaps ++ */ ++ machine__update_kernel_mmap(machine, addr, ~0ULL); + } + + if (machine__create_extra_kernel_maps(machine, kernel)) +@@ -1536,7 +1544,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine, + if (strstr(kernel->long_name, "vmlinux")) + dso__set_short_name(kernel, "[kernel.vmlinux]", false); + +- machine__set_kernel_mmap(machine, event->mmap.start, ++ machine__update_kernel_mmap(machine, event->mmap.start, + event->mmap.start + event->mmap.len); + + /* +diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile +index f9a0e9938480..cb4a992d6dd3 100644 +--- a/tools/testing/selftests/kvm/Makefile ++++ b/tools/testing/selftests/kvm/Makefile +@@ -28,8 +28,8 @@ LIBKVM += $(LIBKVM_$(UNAME_M)) + INSTALL_HDR_PATH = $(top_srcdir)/usr + LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ + LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include +-CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I.. +-LDFLAGS += -pthread ++CFLAGS += -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I.. ++LDFLAGS += -pthread -no-pie + + # After inclusion, $(OUTPUT) is defined and + # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ +diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h +index a84785b02557..07b71ad9734a 100644 +--- a/tools/testing/selftests/kvm/include/kvm_util.h ++++ b/tools/testing/selftests/kvm/include/kvm_util.h +@@ -102,6 +102,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); + struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid); + void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); + int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); ++void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid); + void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_mp_state *mp_state); + void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); +diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c +index b52cfdefecbf..efa0aad8b3c6 100644 +--- a/tools/testing/selftests/kvm/lib/kvm_util.c ++++ b/tools/testing/selftests/kvm/lib/kvm_util.c +@@ -1121,6 +1121,22 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) + return rc; + } + ++void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid) ++{ ++ struct vcpu *vcpu = vcpu_find(vm, vcpuid); ++ int ret; ++ ++ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); ++ ++ vcpu->state->immediate_exit = 1; ++ ret = ioctl(vcpu->fd, KVM_RUN, NULL); ++ vcpu->state->immediate_exit = 0; ++ ++ TEST_ASSERT(ret == -1 && errno == EINTR, ++ "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i", ++ ret, errno); ++} ++ + /* + * VM VCPU Set MP State + * +diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c +index d503a51fad30..7c2c4d4055a8 100644 +--- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c ++++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c +@@ -87,22 +87,25 @@ int main(int argc, char *argv[]) + while (1) { + rc = _vcpu_run(vm, VCPU_ID); + +- if (run->exit_reason == KVM_EXIT_IO) { +- switch (get_ucall(vm, VCPU_ID, &uc)) { +- case UCALL_SYNC: +- /* emulate hypervisor clearing CR4.OSXSAVE */ +- vcpu_sregs_get(vm, VCPU_ID, &sregs); +- sregs.cr4 &= ~X86_CR4_OSXSAVE; +- vcpu_sregs_set(vm, VCPU_ID, &sregs); +- break; +- case UCALL_ABORT: +- TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit."); +- break; +- case UCALL_DONE: +- goto done; +- default: +- TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd); +- } ++ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, ++ "Unexpected exit reason: %u (%s),\n", ++ run->exit_reason, ++ exit_reason_str(run->exit_reason)); ++ ++ switch (get_ucall(vm, VCPU_ID, &uc)) { ++ case UCALL_SYNC: ++ /* emulate hypervisor clearing CR4.OSXSAVE */ ++ vcpu_sregs_get(vm, VCPU_ID, &sregs); ++ sregs.cr4 &= ~X86_CR4_OSXSAVE; ++ vcpu_sregs_set(vm, VCPU_ID, &sregs); ++ break; ++ case UCALL_ABORT: ++ TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit."); ++ break; ++ case UCALL_DONE: ++ goto done; ++ default: ++ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd); + } + } + +diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c +index 4b3f556265f1..30f75856cf39 100644 +--- a/tools/testing/selftests/kvm/x86_64/state_test.c ++++ b/tools/testing/selftests/kvm/x86_64/state_test.c +@@ -134,6 +134,11 @@ int main(int argc, char *argv[]) + + struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); + ++ if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) { ++ fprintf(stderr, "immediate_exit not available, skipping test\n"); ++ exit(KSFT_SKIP); ++ } ++ + /* Create VM */ + vm = vm_create_default(VCPU_ID, 0, guest_code); + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); +@@ -156,8 +161,6 @@ int main(int argc, char *argv[]) + stage, run->exit_reason, + exit_reason_str(run->exit_reason)); + +- memset(®s1, 0, sizeof(regs1)); +- vcpu_regs_get(vm, VCPU_ID, ®s1); + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_ABORT: + TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0], +@@ -176,6 +179,17 @@ int main(int argc, char *argv[]) + uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx", + stage, (ulong)uc.args[1]); + ++ /* ++ * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees ++ * guest state is consistent only after userspace re-enters the ++ * kernel with KVM_RUN. Complete IO prior to migrating state ++ * to a new VM. ++ */ ++ vcpu_run_complete_io(vm, VCPU_ID); ++ ++ memset(®s1, 0, sizeof(regs1)); ++ vcpu_regs_get(vm, VCPU_ID, ®s1); ++ + state = vcpu_save_state(vm, VCPU_ID); + kvm_vm_release(vm); + +diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c +index 9652c453480f..3c3f7cda95c7 100644 +--- a/virt/kvm/arm/hyp/vgic-v3-sr.c ++++ b/virt/kvm/arm/hyp/vgic-v3-sr.c +@@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) + } + } + +- if (used_lrs) { ++ if (used_lrs || cpu_if->its_vpe.its_vm) { + int i; + u32 elrsr; + +@@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) + u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; + int i; + +- if (used_lrs) { ++ if (used_lrs || cpu_if->its_vpe.its_vm) { + write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); + + for (i = 0; i < used_lrs; i++) +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c +index 5cc22cdaa5ba..31e22b615d99 100644 +--- a/virt/kvm/arm/mmu.c ++++ b/virt/kvm/arm/mmu.c +@@ -1060,25 +1060,43 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache + { + pmd_t *pmd, old_pmd; + ++retry: + pmd = stage2_get_pmd(kvm, cache, addr); + VM_BUG_ON(!pmd); + + old_pmd = *pmd; ++ /* ++ * Multiple vcpus faulting on the same PMD entry, can ++ * lead to them sequentially updating the PMD with the ++ * same value. Following the break-before-make ++ * (pmd_clear() followed by tlb_flush()) process can ++ * hinder forward progress due to refaults generated ++ * on missing translations. ++ * ++ * Skip updating the page table if the entry is ++ * unchanged. ++ */ ++ if (pmd_val(old_pmd) == pmd_val(*new_pmd)) ++ return 0; ++ + if (pmd_present(old_pmd)) { + /* +- * Multiple vcpus faulting on the same PMD entry, can +- * lead to them sequentially updating the PMD with the +- * same value. Following the break-before-make +- * (pmd_clear() followed by tlb_flush()) process can +- * hinder forward progress due to refaults generated +- * on missing translations. ++ * If we already have PTE level mapping for this block, ++ * we must unmap it to avoid inconsistent TLB state and ++ * leaking the table page. We could end up in this situation ++ * if the memory slot was marked for dirty logging and was ++ * reverted, leaving PTE level mappings for the pages accessed ++ * during the period. So, unmap the PTE level mapping for this ++ * block and retry, as we could have released the upper level ++ * table in the process. + * +- * Skip updating the page table if the entry is +- * unchanged. ++ * Normal THP split/merge follows mmu_notifier callbacks and do ++ * get handled accordingly. + */ +- if (pmd_val(old_pmd) == pmd_val(*new_pmd)) +- return 0; +- ++ if (!pmd_thp_or_huge(old_pmd)) { ++ unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE); ++ goto retry; ++ } + /* + * Mapping in huge pages should only happen through a + * fault. If a page is merged into a transparent huge +@@ -1090,8 +1108,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache + * should become splitting first, unmapped, merged, + * and mapped back in on-demand. + */ +- VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); +- ++ WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); + pmd_clear(pmd); + kvm_tlb_flush_vmid_ipa(kvm, addr); + } else { +@@ -1107,6 +1124,7 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac + { + pud_t *pudp, old_pud; + ++retry: + pudp = stage2_get_pud(kvm, cache, addr); + VM_BUG_ON(!pudp); + +@@ -1114,14 +1132,23 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac + + /* + * A large number of vcpus faulting on the same stage 2 entry, +- * can lead to a refault due to the +- * stage2_pud_clear()/tlb_flush(). Skip updating the page +- * tables if there is no change. ++ * can lead to a refault due to the stage2_pud_clear()/tlb_flush(). ++ * Skip updating the page tables if there is no change. + */ + if (pud_val(old_pud) == pud_val(*new_pudp)) + return 0; + + if (stage2_pud_present(kvm, old_pud)) { ++ /* ++ * If we already have table level mapping for this block, unmap ++ * the range for this block and retry. ++ */ ++ if (!stage2_pud_huge(kvm, old_pud)) { ++ unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE); ++ goto retry; ++ } ++ ++ WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp)); + stage2_pud_clear(kvm, pudp); + kvm_tlb_flush_vmid_ipa(kvm, addr); + } else { +diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c +index ab3f47745d9c..fcb2fceaa4a5 100644 +--- a/virt/kvm/arm/vgic/vgic-its.c ++++ b/virt/kvm/arm/vgic/vgic-its.c +@@ -754,8 +754,9 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, + u64 indirect_ptr, type = GITS_BASER_TYPE(baser); + phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser); + int esz = GITS_BASER_ENTRY_SIZE(baser); +- int index; ++ int index, idx; + gfn_t gfn; ++ bool ret; + + switch (type) { + case GITS_BASER_TYPE_DEVICE: +@@ -782,7 +783,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, + + if (eaddr) + *eaddr = addr; +- return kvm_is_visible_gfn(its->dev->kvm, gfn); ++ ++ goto out; + } + + /* calculate and check the index into the 1st level */ +@@ -812,7 +814,12 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, + + if (eaddr) + *eaddr = indirect_ptr; +- return kvm_is_visible_gfn(its->dev->kvm, gfn); ++ ++out: ++ idx = srcu_read_lock(&its->dev->kvm->srcu); ++ ret = kvm_is_visible_gfn(its->dev->kvm, gfn); ++ srcu_read_unlock(&its->dev->kvm->srcu, idx); ++ return ret; + } + + static int vgic_its_alloc_collection(struct vgic_its *its, +@@ -1919,7 +1926,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, + ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | + ite->collection->collection_id; + val = cpu_to_le64(val); +- return kvm_write_guest(kvm, gpa, &val, ite_esz); ++ return kvm_write_guest_lock(kvm, gpa, &val, ite_esz); + } + + /** +@@ -2066,7 +2073,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev, + (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) | + (dev->num_eventid_bits - 1)); + val = cpu_to_le64(val); +- return kvm_write_guest(kvm, ptr, &val, dte_esz); ++ return kvm_write_guest_lock(kvm, ptr, &val, dte_esz); + } + + /** +@@ -2246,7 +2253,7 @@ static int vgic_its_save_cte(struct vgic_its *its, + ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) | + collection->collection_id); + val = cpu_to_le64(val); +- return kvm_write_guest(its->dev->kvm, gpa, &val, esz); ++ return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz); + } + + static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) +@@ -2317,7 +2324,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its) + */ + val = 0; + BUG_ON(cte_esz > sizeof(val)); +- ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz); ++ ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz); + return ret; + } + +diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c +index 4ee0aeb9a905..89260964be73 100644 +--- a/virt/kvm/arm/vgic/vgic-v3.c ++++ b/virt/kvm/arm/vgic/vgic-v3.c +@@ -358,7 +358,7 @@ retry: + if (status) { + /* clear consumed data */ + val &= ~(1 << bit_nr); +- ret = kvm_write_guest(kvm, ptr, &val, 1); ++ ret = kvm_write_guest_lock(kvm, ptr, &val, 1); + if (ret) + return ret; + } +@@ -409,7 +409,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) + else + val &= ~(1 << bit_nr); + +- ret = kvm_write_guest(kvm, ptr, &val, 1); ++ ret = kvm_write_guest_lock(kvm, ptr, &val, 1); + if (ret) + return ret; + } +diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c +index abd9c7352677..3af69f2a3866 100644 +--- a/virt/kvm/arm/vgic/vgic.c ++++ b/virt/kvm/arm/vgic/vgic.c +@@ -867,15 +867,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) + * either observe the new interrupt before or after doing this check, + * and introducing additional synchronization mechanism doesn't change + * this. ++ * ++ * Note that we still need to go through the whole thing if anything ++ * can be directly injected (GICv4). + */ +- if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) ++ if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) && ++ !vgic_supports_direct_msis(vcpu->kvm)) + return; + + DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); + +- raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); +- vgic_flush_lr_state(vcpu); +- raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); ++ if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) { ++ raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); ++ vgic_flush_lr_state(vcpu); ++ raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); ++ } + + if (can_access_vgic_from_kernel()) + vgic_restore_state(vcpu); |