summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2021-04-07 09:28:35 -0400
committerMike Pagano <mpagano@gentoo.org>2021-04-07 09:28:35 -0400
commitb1426f1417a3d8dc6818745ed5b3ad90ea972660 (patch)
tree4c085b25ee50f074727c685f13e4efa930b5b0f9
parentlinux patch 5.11.11 (diff)
downloadlinux-patches-b1426f1417a3d8dc6818745ed5b3ad90ea972660.tar.gz
linux-patches-b1426f1417a3d8dc6818745ed5b3ad90ea972660.tar.bz2
linux-patches-b1426f1417a3d8dc6818745ed5b3ad90ea972660.zip
Linux patch 5.11.125.11-15
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1011_linux-5.11.12.patch8224
2 files changed, 8228 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 49fee78f..fe996e43 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch: 1010_linux-5.11.11.patch
From: http://www.kernel.org
Desc: Linux 5.11.11
+Patch: 1011_linux-5.11.12.patch
+From: http://www.kernel.org
+Desc: Linux 5.11.12
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1011_linux-5.11.12.patch b/1011_linux-5.11.12.patch
new file mode 100644
index 00000000..f7186765
--- /dev/null
+++ b/1011_linux-5.11.12.patch
@@ -0,0 +1,8224 @@
+diff --git a/Makefile b/Makefile
+index 7578e0d9622fb..1e31504aab61b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 6f0648777d347..ee01f421e1e4c 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -1445,14 +1445,30 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
+
+ static bool inside_linear_region(u64 start, u64 size)
+ {
++ u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
++ u64 end_linear_pa = __pa(PAGE_END - 1);
++
++ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
++ /*
++ * Check for a wrap, it is possible because of randomized linear
++ * mapping the start physical address is actually bigger than
++ * the end physical address. In this case set start to zero
++ * because [0, end_linear_pa] range must still be able to cover
++ * all addressable physical addresses.
++ */
++ if (start_linear_pa > end_linear_pa)
++ start_linear_pa = 0;
++ }
++
++ WARN_ON(start_linear_pa > end_linear_pa);
++
+ /*
+ * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
+ * accommodating both its ends but excluding PAGE_END. Max physical
+ * range which can be mapped inside this linear mapping range, must
+ * also be derived from its end points.
+ */
+- return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
+- (start + size - 1) <= __pa(PAGE_END - 1);
++ return start >= start_linear_pa && (start + size - 1) <= end_linear_pa;
+ }
+
+ int arch_add_memory(int nid, u64 start, u64 size,
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 764170fdb0f74..3805519a64697 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -887,7 +887,8 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
+
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
+
+- flags = (newpp & 7) | H_AVPN;
++ flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
++ flags |= (newpp & HPTE_R_KEY_HI) >> 48;
+ if (mmu_has_feature(MMU_FTR_KERNEL_RO))
+ /* Move pp0 into bit 8 (IBM 55) */
+ flags |= (newpp & HPTE_R_PP0) >> 55;
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index ea4d6a660e0dc..e83e0891272d3 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -452,12 +452,28 @@ static int do_suspend(void)
+ return ret;
+ }
+
++/**
++ * struct pseries_suspend_info - State shared between CPUs for join/suspend.
++ * @counter: Threads are to increment this upon resuming from suspend
++ * or if an error is received from H_JOIN. The thread which performs
++ * the first increment (i.e. sets it to 1) is responsible for
++ * waking the other threads.
++ * @done: False if join/suspend is in progress. True if the operation is
++ * complete (successful or not).
++ */
++struct pseries_suspend_info {
++ atomic_t counter;
++ bool done;
++};
++
+ static int do_join(void *arg)
+ {
+- atomic_t *counter = arg;
++ struct pseries_suspend_info *info = arg;
++ atomic_t *counter = &info->counter;
+ long hvrc;
+ int ret;
+
++retry:
+ /* Must ensure MSR.EE off for H_JOIN. */
+ hard_irq_disable();
+ hvrc = plpar_hcall_norets(H_JOIN);
+@@ -473,8 +489,20 @@ static int do_join(void *arg)
+ case H_SUCCESS:
+ /*
+ * The suspend is complete and this cpu has received a
+- * prod.
++ * prod, or we've received a stray prod from unrelated
++ * code (e.g. paravirt spinlocks) and we need to join
++ * again.
++ *
++ * This barrier orders the return from H_JOIN above vs
++ * the load of info->done. It pairs with the barrier
++ * in the wakeup/prod path below.
+ */
++ smp_mb();
++ if (READ_ONCE(info->done) == false) {
++ pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
++ smp_processor_id());
++ goto retry;
++ }
+ ret = 0;
+ break;
+ case H_BAD_MODE:
+@@ -488,6 +516,13 @@ static int do_join(void *arg)
+
+ if (atomic_inc_return(counter) == 1) {
+ pr_info("CPU %u waking all threads\n", smp_processor_id());
++ WRITE_ONCE(info->done, true);
++ /*
++ * This barrier orders the store to info->done vs subsequent
++ * H_PRODs to wake the other CPUs. It pairs with the barrier
++ * in the H_SUCCESS case above.
++ */
++ smp_mb();
+ prod_others();
+ }
+ /*
+@@ -535,11 +570,16 @@ static int pseries_suspend(u64 handle)
+ int ret;
+
+ while (true) {
+- atomic_t counter = ATOMIC_INIT(0);
++ struct pseries_suspend_info info;
+ unsigned long vasi_state;
+ int vasi_err;
+
+- ret = stop_machine(do_join, &counter, cpu_online_mask);
++ info = (struct pseries_suspend_info) {
++ .counter = ATOMIC_INIT(0),
++ .done = false,
++ };
++
++ ret = stop_machine(do_join, &info, cpu_online_mask);
+ if (ret == 0)
+ break;
+ /*
+diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
+index 824b2c9da75bd..f944062c9d990 100644
+--- a/arch/riscv/include/asm/uaccess.h
++++ b/arch/riscv/include/asm/uaccess.h
+@@ -306,7 +306,9 @@ do { \
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+- * to the result of dereferencing @ptr.
++ * to the result of dereferencing @ptr. The value of @x is copied to avoid
++ * re-ordering where @x is evaluated inside the block that enables user-space
++ * access (thus bypassing user space protection if @x is a function).
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+@@ -316,12 +318,13 @@ do { \
+ #define __put_user(x, ptr) \
+ ({ \
+ __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
++ __typeof__(*__gu_ptr) __val = (x); \
+ long __pu_err = 0; \
+ \
+ __chk_user_ptr(__gu_ptr); \
+ \
+ __enable_user_access(); \
+- __put_user_nocheck(x, __gu_ptr, __pu_err); \
++ __put_user_nocheck(__val, __gu_ptr, __pu_err); \
+ __disable_user_access(); \
+ \
+ __pu_err; \
+diff --git a/arch/s390/include/asm/vdso/data.h b/arch/s390/include/asm/vdso/data.h
+index 7b3cdb4a5f481..73ee891426662 100644
+--- a/arch/s390/include/asm/vdso/data.h
++++ b/arch/s390/include/asm/vdso/data.h
+@@ -6,7 +6,7 @@
+ #include <vdso/datapage.h>
+
+ struct arch_vdso_data {
+- __u64 tod_steering_delta;
++ __s64 tod_steering_delta;
+ __u64 tod_steering_end;
+ };
+
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index c59cb44fbb7d7..c71c4c12c9d30 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -398,6 +398,7 @@ static void clock_sync_global(unsigned long long delta)
+ tod_steering_delta);
+ tod_steering_end = now + (abs(tod_steering_delta) << 15);
+ vdso_data->arch_data.tod_steering_end = tod_steering_end;
++ vdso_data->arch_data.tod_steering_delta = tod_steering_delta;
+
+ /* Update LPAR offset. */
+ if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
+diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
+index c0538f82c9a22..57ef2094af93e 100644
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -132,6 +132,7 @@ void native_play_dead(void);
+ void play_dead_common(void);
+ void wbinvd_on_cpu(int cpu);
+ int wbinvd_on_all_cpus(void);
++bool wakeup_cpu0(void);
+
+ void native_smp_send_reschedule(int cpu);
+ void native_send_call_func_ipi(const struct cpumask *mask);
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 7bdc0239a9435..14cd3186dc77d 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -1554,10 +1554,18 @@ void __init acpi_boot_table_init(void)
+ /*
+ * Initialize the ACPI boot-time table parser.
+ */
+- if (acpi_table_init()) {
++ if (acpi_locate_initial_tables())
+ disable_acpi();
+- return;
+- }
++ else
++ acpi_reserve_initial_tables();
++}
++
++int __init early_acpi_boot_init(void)
++{
++ if (acpi_disabled)
++ return 1;
++
++ acpi_table_init_complete();
+
+ acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
+
+@@ -1570,18 +1578,9 @@ void __init acpi_boot_table_init(void)
+ } else {
+ printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
+ disable_acpi();
+- return;
++ return 1;
+ }
+ }
+-}
+-
+-int __init early_acpi_boot_init(void)
+-{
+- /*
+- * If acpi_disabled, bail out
+- */
+- if (acpi_disabled)
+- return 1;
+
+ /*
+ * Process the Multiple APIC Description Table (MADT), if present
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 740f3bdb3f619..df964571a6b43 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1046,6 +1046,9 @@ void __init setup_arch(char **cmdline_p)
+
+ cleanup_highmap();
+
++ /* Look for ACPI tables and reserve memory occupied by them. */
++ acpi_boot_table_init();
++
+ memblock_set_current_limit(ISA_END_ADDRESS);
+ e820__memblock_setup();
+
+@@ -1137,11 +1140,6 @@ void __init setup_arch(char **cmdline_p)
+
+ early_platform_quirks();
+
+- /*
+- * Parse the ACPI tables for possible boot-time SMP configuration.
+- */
+- acpi_boot_table_init();
+-
+ early_acpi_boot_init();
+
+ initmem_init();
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 02813a7f3a7cf..f877150a91da1 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1659,7 +1659,7 @@ void play_dead_common(void)
+ local_irq_disable();
+ }
+
+-static bool wakeup_cpu0(void)
++bool wakeup_cpu0(void)
+ {
+ if (smp_processor_id() == 0 && enable_start_cpu0)
+ return true;
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index d36773c7b5359..0b3bf6e2aeb95 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -246,11 +246,18 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
+ return true;
+ }
+
+-static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
++static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
+ {
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+ bool vmcb12_lma;
+
++ /*
++ * FIXME: these should be done after copying the fields,
++ * to avoid TOC/TOU races. For these save area checks
++ * the possible damage is limited since kvm_set_cr0 and
++ * kvm_set_cr4 handle failure; EFER_SVME is an exception
++ * so it is force-set later in nested_prepare_vmcb_save.
++ */
+ if ((vmcb12->save.efer & EFER_SVME) == 0)
+ return false;
+
+@@ -271,7 +278,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+ if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
+ return false;
+
+- return nested_vmcb_check_controls(&vmcb12->control);
++ return true;
+ }
+
+ static void load_nested_vmcb_control(struct vcpu_svm *svm,
+@@ -396,7 +403,14 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
+ svm->vmcb->save.gdtr = vmcb12->save.gdtr;
+ svm->vmcb->save.idtr = vmcb12->save.idtr;
+ kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
+- svm_set_efer(&svm->vcpu, vmcb12->save.efer);
++
++ /*
++ * Force-set EFER_SVME even though it is checked earlier on the
++ * VMCB12, because the guest can flip the bit between the check
++ * and now. Clearing EFER_SVME would call svm_free_nested.
++ */
++ svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
++
+ svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
+ svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
+ svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
+@@ -454,7 +468,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
+ int ret;
+
+ svm->nested.vmcb12_gpa = vmcb12_gpa;
+- load_nested_vmcb_control(svm, &vmcb12->control);
+ nested_prepare_vmcb_save(svm, vmcb12);
+ nested_prepare_vmcb_control(svm);
+
+@@ -501,7 +514,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
+ if (WARN_ON_ONCE(!svm->nested.initialized))
+ return -EINVAL;
+
+- if (!nested_vmcb_checks(svm, vmcb12)) {
++ load_nested_vmcb_control(svm, &vmcb12->control);
++
++ if (!nested_vmcb_check_save(svm, vmcb12) ||
++ !nested_vmcb_check_controls(&svm->nested.ctl)) {
+ vmcb12->control.exit_code = SVM_EXIT_ERR;
+ vmcb12->control.exit_code_hi = 0;
+ vmcb12->control.exit_info_1 = 0;
+@@ -1207,6 +1223,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
+ */
+ if (!(save->cr0 & X86_CR0_PG))
+ goto out_free;
++ if (!(save->efer & EFER_SVME))
++ goto out_free;
+
+ /*
+ * All checks done, we can enter guest mode. L1 control fields
+diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
+index c426b846beefb..45cc0ae0af6f9 100644
+--- a/arch/xtensa/kernel/coprocessor.S
++++ b/arch/xtensa/kernel/coprocessor.S
+@@ -99,37 +99,6 @@
+ LOAD_CP_REGS_TAB(6)
+ LOAD_CP_REGS_TAB(7)
+
+-/*
+- * coprocessor_flush(struct thread_info*, index)
+- * a2 a3
+- *
+- * Save coprocessor registers for coprocessor 'index'.
+- * The register values are saved to or loaded from the coprocessor area
+- * inside the task_info structure.
+- *
+- * Note that this function doesn't update the coprocessor_owner information!
+- *
+- */
+-
+-ENTRY(coprocessor_flush)
+-
+- /* reserve 4 bytes on stack to save a0 */
+- abi_entry(4)
+-
+- s32i a0, a1, 0
+- movi a0, .Lsave_cp_regs_jump_table
+- addx8 a3, a3, a0
+- l32i a4, a3, 4
+- l32i a3, a3, 0
+- add a2, a2, a4
+- beqz a3, 1f
+- callx0 a3
+-1: l32i a0, a1, 0
+-
+- abi_ret(4)
+-
+-ENDPROC(coprocessor_flush)
+-
+ /*
+ * Entry condition:
+ *
+@@ -245,6 +214,39 @@ ENTRY(fast_coprocessor)
+
+ ENDPROC(fast_coprocessor)
+
++ .text
++
++/*
++ * coprocessor_flush(struct thread_info*, index)
++ * a2 a3
++ *
++ * Save coprocessor registers for coprocessor 'index'.
++ * The register values are saved to or loaded from the coprocessor area
++ * inside the task_info structure.
++ *
++ * Note that this function doesn't update the coprocessor_owner information!
++ *
++ */
++
++ENTRY(coprocessor_flush)
++
++ /* reserve 4 bytes on stack to save a0 */
++ abi_entry(4)
++
++ s32i a0, a1, 0
++ movi a0, .Lsave_cp_regs_jump_table
++ addx8 a3, a3, a0
++ l32i a4, a3, 4
++ l32i a3, a3, 0
++ add a2, a2, a4
++ beqz a3, 1f
++ callx0 a3
++1: l32i a0, a1, 0
++
++ abi_ret(4)
++
++ENDPROC(coprocessor_flush)
++
+ .data
+
+ ENTRY(coprocessor_owner)
+diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
+index 7666408ce12a4..95a74890c7e99 100644
+--- a/arch/xtensa/mm/fault.c
++++ b/arch/xtensa/mm/fault.c
+@@ -112,8 +112,11 @@ good_area:
+ */
+ fault = handle_mm_fault(vma, address, flags, regs);
+
+- if (fault_signal_pending(fault, regs))
++ if (fault_signal_pending(fault, regs)) {
++ if (!user_mode(regs))
++ goto bad_page_fault;
+ return;
++ }
+
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index d93e400940a31..768a6b4d23680 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -29,6 +29,7 @@
+ */
+ #ifdef CONFIG_X86
+ #include <asm/apic.h>
++#include <asm/cpu.h>
+ #endif
+
+ #define _COMPONENT ACPI_PROCESSOR_COMPONENT
+@@ -541,6 +542,12 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
+ wait_for_freeze();
+ } else
+ return -ENODEV;
++
++#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
++ /* If NMI wants to wake up CPU0, start CPU0. */
++ if (wakeup_cpu0())
++ start_cpu0();
++#endif
+ }
+
+ /* Never reached */
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index a4fdf61b06444..239eeeafc62f6 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -1669,6 +1669,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
+ device_initialize(&device->dev);
+ dev_set_uevent_suppress(&device->dev, true);
+ acpi_init_coherency(device);
++ /* Assume there are unmet deps to start with. */
++ device->dep_unmet = 1;
+ }
+
+ void acpi_device_add_finalize(struct acpi_device *device)
+@@ -1934,6 +1936,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
+ {
+ struct acpi_dep_data *dep;
+
++ adev->dep_unmet = 0;
++
+ mutex_lock(&acpi_dep_list_lock);
+
+ list_for_each_entry(dep, &acpi_dep_list, node) {
+@@ -1981,7 +1985,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
+ return AE_CTRL_DEPTH;
+
+ acpi_scan_init_hotplug(device);
+- if (!check_dep)
++ /*
++ * If check_dep is true at this point, the device has no dependencies,
++ * or the creation of the device object would have been postponed above.
++ */
++ if (check_dep)
++ device->dep_unmet = 0;
++ else
+ acpi_scan_dep_init(device);
+
+ out:
+diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
+index e48690a006a4e..9d581045acff0 100644
+--- a/drivers/acpi/tables.c
++++ b/drivers/acpi/tables.c
+@@ -780,7 +780,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
+ }
+
+ /*
+- * acpi_table_init()
++ * acpi_locate_initial_tables()
+ *
+ * find RSDP, find and checksum SDT/XSDT.
+ * checksum all tables, print SDT/XSDT
+@@ -788,7 +788,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
+ * result: sdt_entry[] is initialized
+ */
+
+-int __init acpi_table_init(void)
++int __init acpi_locate_initial_tables(void)
+ {
+ acpi_status status;
+
+@@ -803,9 +803,45 @@ int __init acpi_table_init(void)
+ status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+- acpi_table_initrd_scan();
+
++ return 0;
++}
++
++void __init acpi_reserve_initial_tables(void)
++{
++ int i;
++
++ for (i = 0; i < ACPI_MAX_TABLES; i++) {
++ struct acpi_table_desc *table_desc = &initial_tables[i];
++ u64 start = table_desc->address;
++ u64 size = table_desc->length;
++
++ if (!start || !size)
++ break;
++
++ pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n",
++ table_desc->signature.ascii, start, start + size - 1);
++
++ memblock_reserve(start, size);
++ }
++}
++
++void __init acpi_table_init_complete(void)
++{
++ acpi_table_initrd_scan();
+ check_multiple_madt();
++}
++
++int __init acpi_table_init(void)
++{
++ int ret;
++
++ ret = acpi_locate_initial_tables();
++ if (ret)
++ return ret;
++
++ acpi_table_init_complete();
++
+ return 0;
+ }
+
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 9179825ff646f..e2cf3b29123e8 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -97,6 +97,9 @@ static void deferred_probe_work_func(struct work_struct *work)
+
+ get_device(dev);
+
++ kfree(dev->p->deferred_probe_reason);
++ dev->p->deferred_probe_reason = NULL;
++
+ /*
+ * Drop the mutex while probing each device; the probe path may
+ * manipulate the deferred list
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 5ef67bacb585e..d6d73ff94e88f 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1690,8 +1690,8 @@ void pm_runtime_get_suppliers(struct device *dev)
+ device_links_read_lock_held())
+ if (link->flags & DL_FLAG_PM_RUNTIME) {
+ link->supplier_preactivated = true;
+- refcount_inc(&link->rpm_active);
+ pm_runtime_get_sync(link->supplier);
++ refcount_inc(&link->rpm_active);
+ }
+
+ device_links_read_unlock(idx);
+@@ -1704,6 +1704,8 @@ void pm_runtime_get_suppliers(struct device *dev)
+ void pm_runtime_put_suppliers(struct device *dev)
+ {
+ struct device_link *link;
++ unsigned long flags;
++ bool put;
+ int idx;
+
+ idx = device_links_read_lock();
+@@ -1712,7 +1714,11 @@ void pm_runtime_put_suppliers(struct device *dev)
+ device_links_read_lock_held())
+ if (link->supplier_preactivated) {
+ link->supplier_preactivated = false;
+- if (refcount_dec_not_one(&link->rpm_active))
++ spin_lock_irqsave(&dev->power.lock, flags);
++ put = pm_runtime_status_suspended(dev) &&
++ refcount_dec_not_one(&link->rpm_active);
++ spin_unlock_irqrestore(&dev->power.lock, flags);
++ if (put)
+ pm_runtime_put(link->supplier);
+ }
+
+diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
+index 0a6438cbb3f30..e7a9561a826d3 100644
+--- a/drivers/extcon/extcon.c
++++ b/drivers/extcon/extcon.c
+@@ -1241,6 +1241,7 @@ int extcon_dev_register(struct extcon_dev *edev)
+ sizeof(*edev->nh), GFP_KERNEL);
+ if (!edev->nh) {
+ ret = -ENOMEM;
++ device_unregister(&edev->dev);
+ goto err_dev;
+ }
+
+diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
+index 5fd6a60b67410..88ed971e32c0d 100644
+--- a/drivers/firewire/nosy.c
++++ b/drivers/firewire/nosy.c
+@@ -346,6 +346,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ struct client *client = file->private_data;
+ spinlock_t *client_list_lock = &client->lynx->client_list_lock;
+ struct nosy_stats stats;
++ int ret;
+
+ switch (cmd) {
+ case NOSY_IOC_GET_STATS:
+@@ -360,11 +361,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ return 0;
+
+ case NOSY_IOC_START:
++ ret = -EBUSY;
+ spin_lock_irq(client_list_lock);
+- list_add_tail(&client->link, &client->lynx->client_list);
++ if (list_empty(&client->link)) {
++ list_add_tail(&client->link, &client->lynx->client_list);
++ ret = 0;
++ }
+ spin_unlock_irq(client_list_lock);
+
+- return 0;
++ return ret;
+
+ case NOSY_IOC_STOP:
+ spin_lock_irq(client_list_lock);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index b16b32797624a..37010f290ba4e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -780,9 +780,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
+ dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
+ }
+- dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
++ dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+ dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
+- dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE;
++ dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+ dev_info->cu_active_number = adev->gfx.cu_info.number;
+ dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
+ dev_info->ce_ram_size = adev->gfx.ce_ram_size;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 0768c86869836..b24cb44739132 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2195,8 +2195,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ uint64_t eaddr;
+
+ /* validate the parameters */
+- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
+- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
++ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
++ size == 0 || size & ~PAGE_MASK)
+ return -EINVAL;
+
+ /* make sure object fit at this offset */
+@@ -2261,8 +2261,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
+ int r;
+
+ /* validate the parameters */
+- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
+- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
++ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
++ size == 0 || size & ~PAGE_MASK)
+ return -EINVAL;
+
+ /* make sure object fit at this offset */
+@@ -2407,7 +2407,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ after->start = eaddr + 1;
+ after->last = tmp->last;
+ after->offset = tmp->offset;
+- after->offset += after->start - tmp->start;
++ after->offset += (after->start - tmp->start) << PAGE_SHIFT;
+ after->flags = tmp->flags;
+ after->bo_va = tmp->bo_va;
+ list_add(&after->list, &tmp->bo_va->invalids);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+index b258a3dae767f..159add0f5aaae 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+@@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
+
+ /* Wait till CP writes sync code: */
+ status = amdkfd_fence_wait_timeout(
+- (unsigned int *) rm_state,
++ rm_state,
+ QUEUESTATE__ACTIVE, 1500);
+
+ kfd_gtt_sa_free(dbgdev->dev, mem_obj);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index e686ce2bf3b3c..4598a9a581251 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1167,7 +1167,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
+ if (retval)
+ goto fail_allocate_vidmem;
+
+- dqm->fence_addr = dqm->fence_mem->cpu_ptr;
++ dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
+ dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
+
+ init_interrupts(dqm);
+@@ -1340,8 +1340,8 @@ out:
+ return retval;
+ }
+
+-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+- unsigned int fence_value,
++int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
++ uint64_t fence_value,
+ unsigned int timeout_ms)
+ {
+ unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index 7351dd195274e..45f8159465544 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -192,7 +192,7 @@ struct device_queue_manager {
+ uint16_t vmid_pasid[VMID_NUM];
+ uint64_t pipelines_addr;
+ uint64_t fence_gpu_addr;
+- unsigned int *fence_addr;
++ uint64_t *fence_addr;
+ struct kfd_mem_obj *fence_mem;
+ bool active_runlist;
+ int sched_policy;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index 5d541e0cc8ca2..f71a7fa6680c8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -347,7 +347,7 @@ fail_create_runlist_ib:
+ }
+
+ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+- uint32_t fence_value)
++ uint64_t fence_value)
+ {
+ uint32_t *buffer, size;
+ int retval = 0;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+index dfaf771a42e66..e3ba0cd3b6fa7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+@@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
+ }
+
+ static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
+- uint64_t fence_address, uint32_t fence_value)
++ uint64_t fence_address, uint64_t fence_value)
+ {
+ struct pm4_mes_query_status *packet;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+index a852e0d7d804f..08442e7d99440 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+@@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ }
+
+ static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+- uint64_t fence_address, uint32_t fence_value)
++ uint64_t fence_address, uint64_t fence_value)
+ {
+ struct pm4_mes_query_status *packet;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 09599efa41fc9..f304d1f8df5f5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1003,8 +1003,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size);
+
+-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+- unsigned int fence_value,
++int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
++ uint64_t fence_value,
+ unsigned int timeout_ms);
+
+ /* Packet Manager */
+@@ -1040,7 +1040,7 @@ struct packet_manager_funcs {
+ uint32_t filter_param, bool reset,
+ unsigned int sdma_engine);
+ int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
+- uint64_t fence_address, uint32_t fence_value);
++ uint64_t fence_address, uint64_t fence_value);
+ int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
+
+ /* Packet sizes */
+@@ -1062,7 +1062,7 @@ int pm_send_set_resources(struct packet_manager *pm,
+ struct scheduling_resources *res);
+ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
+ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+- uint32_t fence_value);
++ uint64_t fence_value);
+
+ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+ enum kfd_unmap_queues_filter mode,
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 72cb67d50e4ae..c9b1437811053 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -3330,7 +3330,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+
+ disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
+ !hwmgr->display_config->multi_monitor_in_sync) ||
+- smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
++ (hwmgr->display_config->num_display &&
++ smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
+
+ disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
+ disable_mclk_switching_for_display;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+index 92ad2cdbae107..bf6e364192539 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+@@ -388,10 +388,15 @@ static int vangogh_get_allowed_feature_mask(struct smu_context *smu,
+
+ static bool vangogh_is_dpm_running(struct smu_context *smu)
+ {
++ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+ uint32_t feature_mask[2];
+ uint64_t feature_enabled;
+
++ /* we need to re-init after suspend so return false */
++ if (adev->in_suspend)
++ return false;
++
+ ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+
+ if (ret)
+diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
+index d1a9841adeedf..e6a88c8cbd691 100644
+--- a/drivers/gpu/drm/imx/imx-drm-core.c
++++ b/drivers/gpu/drm/imx/imx-drm-core.c
+@@ -215,7 +215,7 @@ static int imx_drm_bind(struct device *dev)
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+- return ret;
++ goto err_kms;
+
+ ret = drm_vblank_init(drm, MAX_CRTC);
+ if (ret)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index f1c9a22083beb..e05565f284dcc 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -551,6 +551,10 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
+
+ if (!ttm_dma)
+ return;
++ if (!ttm_dma->pages) {
++ NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
++ return;
++ }
+
+ /* Don't waste time looping if the object is coherent */
+ if (nvbo->force_coherent)
+@@ -583,6 +587,10 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
+
+ if (!ttm_dma)
+ return;
++ if (!ttm_dma->pages) {
++ NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
++ return;
++ }
+
+ /* Don't waste time looping if the object is coherent */
+ if (nvbo->force_coherent)
+diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
+index 0ae3a025efe9d..8eeef5017826e 100644
+--- a/drivers/gpu/drm/tegra/dc.c
++++ b/drivers/gpu/drm/tegra/dc.c
+@@ -2501,22 +2501,18 @@ static int tegra_dc_couple(struct tegra_dc *dc)
+ * POWER_CONTROL registers during CRTC enabling.
+ */
+ if (dc->soc->coupled_pm && dc->pipe == 1) {
+- u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
+- struct device_link *link;
+- struct device *partner;
++ struct device *companion;
++ struct tegra_dc *parent;
+
+- partner = driver_find_device(dc->dev->driver, NULL, NULL,
+- tegra_dc_match_by_pipe);
+- if (!partner)
++ companion = driver_find_device(dc->dev->driver, NULL, (const void *)0,
++ tegra_dc_match_by_pipe);
++ if (!companion)
+ return -EPROBE_DEFER;
+
+- link = device_link_add(dc->dev, partner, flags);
+- if (!link) {
+- dev_err(dc->dev, "failed to link controllers\n");
+- return -EINVAL;
+- }
++ parent = dev_get_drvdata(companion);
++ dc->client.parent = &parent->client;
+
+- dev_dbg(dc->dev, "coupled to %s\n", dev_name(partner));
++ dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
+index f02a035dda453..7b88261f57bb6 100644
+--- a/drivers/gpu/drm/tegra/sor.c
++++ b/drivers/gpu/drm/tegra/sor.c
+@@ -3115,6 +3115,12 @@ static int tegra_sor_init(struct host1x_client *client)
+ * kernel is possible.
+ */
+ if (sor->rst) {
++ err = pm_runtime_resume_and_get(sor->dev);
++ if (err < 0) {
++ dev_err(sor->dev, "failed to get runtime PM: %d\n", err);
++ return err;
++ }
++
+ err = reset_control_acquire(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
+@@ -3148,6 +3154,7 @@ static int tegra_sor_init(struct host1x_client *client)
+ }
+
+ reset_control_release(sor->rst);
++ pm_runtime_put(sor->dev);
+ }
+
+ err = clk_prepare_enable(sor->clk_safe);
+diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
+index 22164300122d5..a2b4463d84802 100644
+--- a/drivers/net/can/Makefile
++++ b/drivers/net/can/Makefile
+@@ -7,12 +7,7 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
+ obj-$(CONFIG_CAN_VXCAN) += vxcan.o
+ obj-$(CONFIG_CAN_SLCAN) += slcan.o
+
+-obj-$(CONFIG_CAN_DEV) += can-dev.o
+-can-dev-y += dev.o
+-can-dev-y += rx-offload.o
+-
+-can-dev-$(CONFIG_CAN_LEDS) += led.o
+-
++obj-y += dev/
+ obj-y += rcar/
+ obj-y += spi/
+ obj-y += usb/
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+deleted file mode 100644
+index 2a4f12c3c28b0..0000000000000
+--- a/drivers/net/can/dev.c
++++ /dev/null
+@@ -1,1339 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+- * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+- * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+- */
+-
+-#include <linux/module.h>
+-#include <linux/kernel.h>
+-#include <linux/slab.h>
+-#include <linux/netdevice.h>
+-#include <linux/if_arp.h>
+-#include <linux/workqueue.h>
+-#include <linux/can.h>
+-#include <linux/can/can-ml.h>
+-#include <linux/can/dev.h>
+-#include <linux/can/skb.h>
+-#include <linux/can/netlink.h>
+-#include <linux/can/led.h>
+-#include <linux/of.h>
+-#include <net/rtnetlink.h>
+-
+-#define MOD_DESC "CAN device driver interface"
+-
+-MODULE_DESCRIPTION(MOD_DESC);
+-MODULE_LICENSE("GPL v2");
+-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+-
+-/* CAN DLC to real data length conversion helpers */
+-
+-static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
+- 8, 12, 16, 20, 24, 32, 48, 64};
+-
+-/* get data length from raw data length code (DLC) */
+-u8 can_fd_dlc2len(u8 dlc)
+-{
+- return dlc2len[dlc & 0x0F];
+-}
+-EXPORT_SYMBOL_GPL(can_fd_dlc2len);
+-
+-static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
+- 9, 9, 9, 9, /* 9 - 12 */
+- 10, 10, 10, 10, /* 13 - 16 */
+- 11, 11, 11, 11, /* 17 - 20 */
+- 12, 12, 12, 12, /* 21 - 24 */
+- 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
+- 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
+- 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
+- 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
+- 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
+-
+-/* map the sanitized data length to an appropriate data length code */
+-u8 can_fd_len2dlc(u8 len)
+-{
+- if (unlikely(len > 64))
+- return 0xF;
+-
+- return len2dlc[len];
+-}
+-EXPORT_SYMBOL_GPL(can_fd_len2dlc);
+-
+-#ifdef CONFIG_CAN_CALC_BITTIMING
+-#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+-
+-/* Bit-timing calculation derived from:
+- *
+- * Code based on LinCAN sources and H8S2638 project
+- * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
+- * Copyright 2005 Stanislav Marek
+- * email: pisa@cmp.felk.cvut.cz
+- *
+- * Calculates proper bit-timing parameters for a specified bit-rate
+- * and sample-point, which can then be used to set the bit-timing
+- * registers of the CAN controller. You can find more information
+- * in the header file linux/can/netlink.h.
+- */
+-static int
+-can_update_sample_point(const struct can_bittiming_const *btc,
+- unsigned int sample_point_nominal, unsigned int tseg,
+- unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+- unsigned int *sample_point_error_ptr)
+-{
+- unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+- unsigned int sample_point, best_sample_point = 0;
+- unsigned int tseg1, tseg2;
+- int i;
+-
+- for (i = 0; i <= 1; i++) {
+- tseg2 = tseg + CAN_SYNC_SEG -
+- (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
+- 1000 - i;
+- tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+- tseg1 = tseg - tseg2;
+- if (tseg1 > btc->tseg1_max) {
+- tseg1 = btc->tseg1_max;
+- tseg2 = tseg - tseg1;
+- }
+-
+- sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
+- (tseg + CAN_SYNC_SEG);
+- sample_point_error = abs(sample_point_nominal - sample_point);
+-
+- if (sample_point <= sample_point_nominal &&
+- sample_point_error < best_sample_point_error) {
+- best_sample_point = sample_point;
+- best_sample_point_error = sample_point_error;
+- *tseg1_ptr = tseg1;
+- *tseg2_ptr = tseg2;
+- }
+- }
+-
+- if (sample_point_error_ptr)
+- *sample_point_error_ptr = best_sample_point_error;
+-
+- return best_sample_point;
+-}
+-
+-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+- const struct can_bittiming_const *btc)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+- unsigned int bitrate; /* current bitrate */
+- unsigned int bitrate_error; /* difference between current and nominal value */
+- unsigned int best_bitrate_error = UINT_MAX;
+- unsigned int sample_point_error; /* difference between current and nominal value */
+- unsigned int best_sample_point_error = UINT_MAX;
+- unsigned int sample_point_nominal; /* nominal sample point */
+- unsigned int best_tseg = 0; /* current best value for tseg */
+- unsigned int best_brp = 0; /* current best value for brp */
+- unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
+- u64 v64;
+-
+- /* Use CiA recommended sample points */
+- if (bt->sample_point) {
+- sample_point_nominal = bt->sample_point;
+- } else {
+- if (bt->bitrate > 800000)
+- sample_point_nominal = 750;
+- else if (bt->bitrate > 500000)
+- sample_point_nominal = 800;
+- else
+- sample_point_nominal = 875;
+- }
+-
+- /* tseg even = round down, odd = round up */
+- for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+- tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+- tsegall = CAN_SYNC_SEG + tseg / 2;
+-
+- /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+- brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
+-
+- /* choose brp step which is possible in system */
+- brp = (brp / btc->brp_inc) * btc->brp_inc;
+- if (brp < btc->brp_min || brp > btc->brp_max)
+- continue;
+-
+- bitrate = priv->clock.freq / (brp * tsegall);
+- bitrate_error = abs(bt->bitrate - bitrate);
+-
+- /* tseg brp biterror */
+- if (bitrate_error > best_bitrate_error)
+- continue;
+-
+- /* reset sample point error if we have a better bitrate */
+- if (bitrate_error < best_bitrate_error)
+- best_sample_point_error = UINT_MAX;
+-
+- can_update_sample_point(btc, sample_point_nominal, tseg / 2,
+- &tseg1, &tseg2, &sample_point_error);
+- if (sample_point_error > best_sample_point_error)
+- continue;
+-
+- best_sample_point_error = sample_point_error;
+- best_bitrate_error = bitrate_error;
+- best_tseg = tseg / 2;
+- best_brp = brp;
+-
+- if (bitrate_error == 0 && sample_point_error == 0)
+- break;
+- }
+-
+- if (best_bitrate_error) {
+- /* Error in one-tenth of a percent */
+- v64 = (u64)best_bitrate_error * 1000;
+- do_div(v64, bt->bitrate);
+- bitrate_error = (u32)v64;
+- if (bitrate_error > CAN_CALC_MAX_ERROR) {
+- netdev_err(dev,
+- "bitrate error %d.%d%% too high\n",
+- bitrate_error / 10, bitrate_error % 10);
+- return -EDOM;
+- }
+- netdev_warn(dev, "bitrate error %d.%d%%\n",
+- bitrate_error / 10, bitrate_error % 10);
+- }
+-
+- /* real sample point */
+- bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
+- best_tseg, &tseg1, &tseg2,
+- NULL);
+-
+- v64 = (u64)best_brp * 1000 * 1000 * 1000;
+- do_div(v64, priv->clock.freq);
+- bt->tq = (u32)v64;
+- bt->prop_seg = tseg1 / 2;
+- bt->phase_seg1 = tseg1 - bt->prop_seg;
+- bt->phase_seg2 = tseg2;
+-
+- /* check for sjw user settings */
+- if (!bt->sjw || !btc->sjw_max) {
+- bt->sjw = 1;
+- } else {
+- /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
+- if (bt->sjw > btc->sjw_max)
+- bt->sjw = btc->sjw_max;
+- /* bt->sjw must not be higher than tseg2 */
+- if (tseg2 < bt->sjw)
+- bt->sjw = tseg2;
+- }
+-
+- bt->brp = best_brp;
+-
+- /* real bitrate */
+- bt->bitrate = priv->clock.freq /
+- (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
+-
+- return 0;
+-}
+-#else /* !CONFIG_CAN_CALC_BITTIMING */
+-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+- const struct can_bittiming_const *btc)
+-{
+- netdev_err(dev, "bit-timing calculation not available\n");
+- return -EINVAL;
+-}
+-#endif /* CONFIG_CAN_CALC_BITTIMING */
+-
+-/* Checks the validity of the specified bit-timing parameters prop_seg,
+- * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
+- * prescaler value brp. You can find more information in the header
+- * file linux/can/netlink.h.
+- */
+-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
+- const struct can_bittiming_const *btc)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+- int tseg1, alltseg;
+- u64 brp64;
+-
+- tseg1 = bt->prop_seg + bt->phase_seg1;
+- if (!bt->sjw)
+- bt->sjw = 1;
+- if (bt->sjw > btc->sjw_max ||
+- tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
+- bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
+- return -ERANGE;
+-
+- brp64 = (u64)priv->clock.freq * (u64)bt->tq;
+- if (btc->brp_inc > 1)
+- do_div(brp64, btc->brp_inc);
+- brp64 += 500000000UL - 1;
+- do_div(brp64, 1000000000UL); /* the practicable BRP */
+- if (btc->brp_inc > 1)
+- brp64 *= btc->brp_inc;
+- bt->brp = (u32)brp64;
+-
+- if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
+- return -EINVAL;
+-
+- alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
+- bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
+- bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
+-
+- return 0;
+-}
+-
+-/* Checks the validity of predefined bitrate settings */
+-static int
+-can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+- const u32 *bitrate_const,
+- const unsigned int bitrate_const_cnt)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+- unsigned int i;
+-
+- for (i = 0; i < bitrate_const_cnt; i++) {
+- if (bt->bitrate == bitrate_const[i])
+- break;
+- }
+-
+- if (i >= priv->bitrate_const_cnt)
+- return -EINVAL;
+-
+- return 0;
+-}
+-
+-static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+- const struct can_bittiming_const *btc,
+- const u32 *bitrate_const,
+- const unsigned int bitrate_const_cnt)
+-{
+- int err;
+-
+- /* Depending on the given can_bittiming parameter structure the CAN
+- * timing parameters are calculated based on the provided bitrate OR
+- * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
+- * provided directly which are then checked and fixed up.
+- */
+- if (!bt->tq && bt->bitrate && btc)
+- err = can_calc_bittiming(dev, bt, btc);
+- else if (bt->tq && !bt->bitrate && btc)
+- err = can_fixup_bittiming(dev, bt, btc);
+- else if (!bt->tq && bt->bitrate && bitrate_const)
+- err = can_validate_bitrate(dev, bt, bitrate_const,
+- bitrate_const_cnt);
+- else
+- err = -EINVAL;
+-
+- return err;
+-}
+-
+-static void can_update_state_error_stats(struct net_device *dev,
+- enum can_state new_state)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+-
+- if (new_state <= priv->state)
+- return;
+-
+- switch (new_state) {
+- case CAN_STATE_ERROR_WARNING:
+- priv->can_stats.error_warning++;
+- break;
+- case CAN_STATE_ERROR_PASSIVE:
+- priv->can_stats.error_passive++;
+- break;
+- case CAN_STATE_BUS_OFF:
+- priv->can_stats.bus_off++;
+- break;
+- default:
+- break;
+- }
+-}
+-
+-static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
+-{
+- switch (state) {
+- case CAN_STATE_ERROR_ACTIVE:
+- return CAN_ERR_CRTL_ACTIVE;
+- case CAN_STATE_ERROR_WARNING:
+- return CAN_ERR_CRTL_TX_WARNING;
+- case CAN_STATE_ERROR_PASSIVE:
+- return CAN_ERR_CRTL_TX_PASSIVE;
+- default:
+- return 0;
+- }
+-}
+-
+-static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
+-{
+- switch (state) {
+- case CAN_STATE_ERROR_ACTIVE:
+- return CAN_ERR_CRTL_ACTIVE;
+- case CAN_STATE_ERROR_WARNING:
+- return CAN_ERR_CRTL_RX_WARNING;
+- case CAN_STATE_ERROR_PASSIVE:
+- return CAN_ERR_CRTL_RX_PASSIVE;
+- default:
+- return 0;
+- }
+-}
+-
+-static const char *can_get_state_str(const enum can_state state)
+-{
+- switch (state) {
+- case CAN_STATE_ERROR_ACTIVE:
+- return "Error Active";
+- case CAN_STATE_ERROR_WARNING:
+- return "Error Warning";
+- case CAN_STATE_ERROR_PASSIVE:
+- return "Error Passive";
+- case CAN_STATE_BUS_OFF:
+- return "Bus Off";
+- case CAN_STATE_STOPPED:
+- return "Stopped";
+- case CAN_STATE_SLEEPING:
+- return "Sleeping";
+- default:
+- return "<unknown>";
+- }
+-
+- return "<unknown>";
+-}
+-
+-void can_change_state(struct net_device *dev, struct can_frame *cf,
+- enum can_state tx_state, enum can_state rx_state)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+- enum can_state new_state = max(tx_state, rx_state);
+-
+- if (unlikely(new_state == priv->state)) {
+- netdev_warn(dev, "%s: oops, state did not change", __func__);
+- return;
+- }
+-
+- netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
+- can_get_state_str(priv->state), priv->state,
+- can_get_state_str(new_state), new_state);
+-
+- can_update_state_error_stats(dev, new_state);
+- priv->state = new_state;
+-
+- if (!cf)
+- return;
+-
+- if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
+- cf->can_id |= CAN_ERR_BUSOFF;
+- return;
+- }
+-
+- cf->can_id |= CAN_ERR_CRTL;
+- cf->data[1] |= tx_state >= rx_state ?
+- can_tx_state_to_frame(dev, tx_state) : 0;
+- cf->data[1] |= tx_state <= rx_state ?
+- can_rx_state_to_frame(dev, rx_state) : 0;
+-}
+-EXPORT_SYMBOL_GPL(can_change_state);
+-
+-/* Local echo of CAN messages
+- *
+- * CAN network devices *should* support a local echo functionality
+- * (see Documentation/networking/can.rst). To test the handling of CAN
+- * interfaces that do not support the local echo both driver types are
+- * implemented. In the case that the driver does not support the echo
+- * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
+- * to perform the echo as a fallback solution.
+- */
+-static void can_flush_echo_skb(struct net_device *dev)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+- struct net_device_stats *stats = &dev->stats;
+- int i;
+-
+- for (i = 0; i < priv->echo_skb_max; i++) {
+- if (priv->echo_skb[i]) {
+- kfree_skb(priv->echo_skb[i]);
+- priv->echo_skb[i] = NULL;
+- stats->tx_dropped++;
+- stats->tx_aborted_errors++;
+- }
+- }
+-}
+-
+-/* Put the skb on the stack to be looped backed locally lateron
+- *
+- * The function is typically called in the start_xmit function
+- * of the device driver. The driver must protect access to
+- * priv->echo_skb, if necessary.
+- */
+-int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+- unsigned int idx)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+-
+- BUG_ON(idx >= priv->echo_skb_max);
+-
+- /* check flag whether this packet has to be looped back */
+- if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
+- (skb->protocol != htons(ETH_P_CAN) &&
+- skb->protocol != htons(ETH_P_CANFD))) {
+- kfree_skb(skb);
+- return 0;
+- }
+-
+- if (!priv->echo_skb[idx]) {
+- skb = can_create_echo_skb(skb);
+- if (!skb)
+- return -ENOMEM;
+-
+- /* make settings for echo to reduce code in irq context */
+- skb->pkt_type = PACKET_BROADCAST;
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- skb->dev = dev;
+-
+- /* save this skb for tx interrupt echo handling */
+- priv->echo_skb[idx] = skb;
+- } else {
+- /* locking problem with netif_stop_queue() ?? */
+- netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
+- kfree_skb(skb);
+- return -EBUSY;
+- }
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(can_put_echo_skb);
+-
+-struct sk_buff *
+-__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+-
+- if (idx >= priv->echo_skb_max) {
+- netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+- __func__, idx, priv->echo_skb_max);
+- return NULL;
+- }
+-
+- if (priv->echo_skb[idx]) {
+- /* Using "struct canfd_frame::len" for the frame
+- * length is supported on both CAN and CANFD frames.
+- */
+- struct sk_buff *skb = priv->echo_skb[idx];
+- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+-
+- /* get the real payload length for netdev statistics */
+- if (cf->can_id & CAN_RTR_FLAG)
+- *len_ptr = 0;
+- else
+- *len_ptr = cf->len;
+-
+- priv->echo_skb[idx] = NULL;
+-
+- return skb;
+- }
+-
+- return NULL;
+-}
+-
+-/* Get the skb from the stack and loop it back locally
+- *
+- * The function is typically called when the TX done interrupt
+- * is handled in the device driver. The driver must protect
+- * access to priv->echo_skb, if necessary.
+- */
+-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
+-{
+- struct sk_buff *skb;
+- u8 len;
+-
+- skb = __can_get_echo_skb(dev, idx, &len);
+- if (!skb)
+- return 0;
+-
+- skb_get(skb);
+- if (netif_rx(skb) == NET_RX_SUCCESS)
+- dev_consume_skb_any(skb);
+- else
+- dev_kfree_skb_any(skb);
+-
+- return len;
+-}
+-EXPORT_SYMBOL_GPL(can_get_echo_skb);
+-
+-/* Remove the skb from the stack and free it.
+- *
+- * The function is typically called when TX failed.
+- */
+-void can_free_echo_skb(struct net_device *dev, unsigned int idx)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+-
+- BUG_ON(idx >= priv->echo_skb_max);
+-
+- if (priv->echo_skb[idx]) {
+- dev_kfree_skb_any(priv->echo_skb[idx]);
+- priv->echo_skb[idx] = NULL;
+- }
+-}
+-EXPORT_SYMBOL_GPL(can_free_echo_skb);
+-
+-/* CAN device restart for bus-off recovery */
+-static void can_restart(struct net_device *dev)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+- struct net_device_stats *stats = &dev->stats;
+- struct sk_buff *skb;
+- struct can_frame *cf;
+- int err;
+-
+- BUG_ON(netif_carrier_ok(dev));
+-
+- /* No synchronization needed because the device is bus-off and
+- * no messages can come in or go out.
+- */
+- can_flush_echo_skb(dev);
+-
+- /* send restart message upstream */
+- skb = alloc_can_err_skb(dev, &cf);
+- if (!skb)
+- goto restart;
+-
+- cf->can_id |= CAN_ERR_RESTARTED;
+-
+- stats->rx_packets++;
+- stats->rx_bytes += cf->len;
+-
+- netif_rx_ni(skb);
+-
+-restart:
+- netdev_dbg(dev, "restarted\n");
+- priv->can_stats.restarts++;
+-
+- /* Now restart the device */
+- err = priv->do_set_mode(dev, CAN_MODE_START);
+-
+- netif_carrier_on(dev);
+- if (err)
+- netdev_err(dev, "Error %d during restart", err);
+-}
+-
+-static void can_restart_work(struct work_struct *work)
+-{
+- struct delayed_work *dwork = to_delayed_work(work);
+- struct can_priv *priv = container_of(dwork, struct can_priv,
+- restart_work);
+-
+- can_restart(priv->dev);
+-}
+-
+-int can_restart_now(struct net_device *dev)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+-
+- /* A manual restart is only permitted if automatic restart is
+- * disabled and the device is in the bus-off state
+- */
+- if (priv->restart_ms)
+- return -EINVAL;
+- if (priv->state != CAN_STATE_BUS_OFF)
+- return -EBUSY;
+-
+- cancel_delayed_work_sync(&priv->restart_work);
+- can_restart(dev);
+-
+- return 0;
+-}
+-
+-/* CAN bus-off
+- *
+- * This functions should be called when the device goes bus-off to
+- * tell the netif layer that no more packets can be sent or received.
+- * If enabled, a timer is started to trigger bus-off recovery.
+- */
+-void can_bus_off(struct net_device *dev)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+-
+- if (priv->restart_ms)
+- netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
+- priv->restart_ms);
+- else
+- netdev_info(dev, "bus-off\n");
+-
+- netif_carrier_off(dev);
+-
+- if (priv->restart_ms)
+- schedule_delayed_work(&priv->restart_work,
+- msecs_to_jiffies(priv->restart_ms));
+-}
+-EXPORT_SYMBOL_GPL(can_bus_off);
+-
+-static void can_setup(struct net_device *dev)
+-{
+- dev->type = ARPHRD_CAN;
+- dev->mtu = CAN_MTU;
+- dev->hard_header_len = 0;
+- dev->addr_len = 0;
+- dev->tx_queue_len = 10;
+-
+- /* New-style flags. */
+- dev->flags = IFF_NOARP;
+- dev->features = NETIF_F_HW_CSUM;
+-}
+-
+-struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+-{
+- struct sk_buff *skb;
+-
+- skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+- sizeof(struct can_frame));
+- if (unlikely(!skb))
+- return NULL;
+-
+- skb->protocol = htons(ETH_P_CAN);
+- skb->pkt_type = PACKET_BROADCAST;
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+-
+- skb_reset_mac_header(skb);
+- skb_reset_network_header(skb);
+- skb_reset_transport_header(skb);
+-
+- can_skb_reserve(skb);
+- can_skb_prv(skb)->ifindex = dev->ifindex;
+- can_skb_prv(skb)->skbcnt = 0;
+-
+- *cf = skb_put_zero(skb, sizeof(struct can_frame));
+-
+- return skb;
+-}
+-EXPORT_SYMBOL_GPL(alloc_can_skb);
+-
+-struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+- struct canfd_frame **cfd)
+-{
+- struct sk_buff *skb;
+-
+- skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+- sizeof(struct canfd_frame));
+- if (unlikely(!skb))
+- return NULL;
+-
+- skb->protocol = htons(ETH_P_CANFD);
+- skb->pkt_type = PACKET_BROADCAST;
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+-
+- skb_reset_mac_header(skb);
+- skb_reset_network_header(skb);
+- skb_reset_transport_header(skb);
+-
+- can_skb_reserve(skb);
+- can_skb_prv(skb)->ifindex = dev->ifindex;
+- can_skb_prv(skb)->skbcnt = 0;
+-
+- *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
+-
+- return skb;
+-}
+-EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+-
+-struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
+-{
+- struct sk_buff *skb;
+-
+- skb = alloc_can_skb(dev, cf);
+- if (unlikely(!skb))
+- return NULL;
+-
+- (*cf)->can_id = CAN_ERR_FLAG;
+- (*cf)->len = CAN_ERR_DLC;
+-
+- return skb;
+-}
+-EXPORT_SYMBOL_GPL(alloc_can_err_skb);
+-
+-/* Allocate and setup space for the CAN network device */
+-struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+- unsigned int txqs, unsigned int rxqs)
+-{
+- struct net_device *dev;
+- struct can_priv *priv;
+- int size;
+-
+- /* We put the driver's priv, the CAN mid layer priv and the
+- * echo skb into the netdevice's priv. The memory layout for
+- * the netdev_priv is like this:
+- *
+- * +-------------------------+
+- * | driver's priv |
+- * +-------------------------+
+- * | struct can_ml_priv |
+- * +-------------------------+
+- * | array of struct sk_buff |
+- * +-------------------------+
+- */
+-
+- size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
+-
+- if (echo_skb_max)
+- size = ALIGN(size, sizeof(struct sk_buff *)) +
+- echo_skb_max * sizeof(struct sk_buff *);
+-
+- dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
+- txqs, rxqs);
+- if (!dev)
+- return NULL;
+-
+- priv = netdev_priv(dev);
+- priv->dev = dev;
+-
+- dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
+-
+- if (echo_skb_max) {
+- priv->echo_skb_max = echo_skb_max;
+- priv->echo_skb = (void *)priv +
+- (size - echo_skb_max * sizeof(struct sk_buff *));
+- }
+-
+- priv->state = CAN_STATE_STOPPED;
+-
+- INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
+-
+- return dev;
+-}
+-EXPORT_SYMBOL_GPL(alloc_candev_mqs);
+-
+-/* Free space of the CAN network device */
+-void free_candev(struct net_device *dev)
+-{
+- free_netdev(dev);
+-}
+-EXPORT_SYMBOL_GPL(free_candev);
+-
+-/* changing MTU and control mode for CAN/CANFD devices */
+-int can_change_mtu(struct net_device *dev, int new_mtu)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+-
+- /* Do not allow changing the MTU while running */
+- if (dev->flags & IFF_UP)
+- return -EBUSY;
+-
+- /* allow change of MTU according to the CANFD ability of the device */
+- switch (new_mtu) {
+- case CAN_MTU:
+- /* 'CANFD-only' controllers can not switch to CAN_MTU */
+- if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
+- return -EINVAL;
+-
+- priv->ctrlmode &= ~CAN_CTRLMODE_FD;
+- break;
+-
+- case CANFD_MTU:
+- /* check for potential CANFD ability */
+- if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
+- !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
+- return -EINVAL;
+-
+- priv->ctrlmode |= CAN_CTRLMODE_FD;
+- break;
+-
+- default:
+- return -EINVAL;
+- }
+-
+- dev->mtu = new_mtu;
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(can_change_mtu);
+-
+-/* Common open function when the device gets opened.
+- *
+- * This function should be called in the open function of the device
+- * driver.
+- */
+-int open_candev(struct net_device *dev)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+-
+- if (!priv->bittiming.bitrate) {
+- netdev_err(dev, "bit-timing not yet defined\n");
+- return -EINVAL;
+- }
+-
+- /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
+- if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
+- (!priv->data_bittiming.bitrate ||
+- priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
+- netdev_err(dev, "incorrect/missing data bit-timing\n");
+- return -EINVAL;
+- }
+-
+- /* Switch carrier on if device was stopped while in bus-off state */
+- if (!netif_carrier_ok(dev))
+- netif_carrier_on(dev);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(open_candev);
+-
+-#ifdef CONFIG_OF
+-/* Common function that can be used to understand the limitation of
+- * a transceiver when it provides no means to determine these limitations
+- * at runtime.
+- */
+-void of_can_transceiver(struct net_device *dev)
+-{
+- struct device_node *dn;
+- struct can_priv *priv = netdev_priv(dev);
+- struct device_node *np = dev->dev.parent->of_node;
+- int ret;
+-
+- dn = of_get_child_by_name(np, "can-transceiver");
+- if (!dn)
+- return;
+-
+- ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
+- of_node_put(dn);
+- if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
+- netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
+-}
+-EXPORT_SYMBOL_GPL(of_can_transceiver);
+-#endif
+-
+-/* Common close function for cleanup before the device gets closed.
+- *
+- * This function should be called in the close function of the device
+- * driver.
+- */
+-void close_candev(struct net_device *dev)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+-
+- cancel_delayed_work_sync(&priv->restart_work);
+- can_flush_echo_skb(dev);
+-}
+-EXPORT_SYMBOL_GPL(close_candev);
+-
+-/* CAN netlink interface */
+-static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
+- [IFLA_CAN_STATE] = { .type = NLA_U32 },
+- [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
+- [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
+- [IFLA_CAN_RESTART] = { .type = NLA_U32 },
+- [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+- [IFLA_CAN_BITTIMING_CONST]
+- = { .len = sizeof(struct can_bittiming_const) },
+- [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
+- [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
+- [IFLA_CAN_DATA_BITTIMING]
+- = { .len = sizeof(struct can_bittiming) },
+- [IFLA_CAN_DATA_BITTIMING_CONST]
+- = { .len = sizeof(struct can_bittiming_const) },
+- [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
+-};
+-
+-static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+- struct netlink_ext_ack *extack)
+-{
+- bool is_can_fd = false;
+-
+- /* Make sure that valid CAN FD configurations always consist of
+- * - nominal/arbitration bittiming
+- * - data bittiming
+- * - control mode with CAN_CTRLMODE_FD set
+- */
+-
+- if (!data)
+- return 0;
+-
+- if (data[IFLA_CAN_CTRLMODE]) {
+- struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+-
+- is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+- }
+-
+- if (is_can_fd) {
+- if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
+- return -EOPNOTSUPP;
+- }
+-
+- if (data[IFLA_CAN_DATA_BITTIMING]) {
+- if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
+- return -EOPNOTSUPP;
+- }
+-
+- return 0;
+-}
+-
+-static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+- struct nlattr *data[],
+- struct netlink_ext_ack *extack)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+- int err;
+-
+- /* We need synchronization with dev->stop() */
+- ASSERT_RTNL();
+-
+- if (data[IFLA_CAN_BITTIMING]) {
+- struct can_bittiming bt;
+-
+- /* Do not allow changing bittiming while running */
+- if (dev->flags & IFF_UP)
+- return -EBUSY;
+-
+- /* Calculate bittiming parameters based on
+- * bittiming_const if set, otherwise pass bitrate
+- * directly via do_set_bitrate(). Bail out if neither
+- * is given.
+- */
+- if (!priv->bittiming_const && !priv->do_set_bittiming)
+- return -EOPNOTSUPP;
+-
+- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+- err = can_get_bittiming(dev, &bt,
+- priv->bittiming_const,
+- priv->bitrate_const,
+- priv->bitrate_const_cnt);
+- if (err)
+- return err;
+-
+- if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
+- netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
+- priv->bitrate_max);
+- return -EINVAL;
+- }
+-
+- memcpy(&priv->bittiming, &bt, sizeof(bt));
+-
+- if (priv->do_set_bittiming) {
+- /* Finally, set the bit-timing registers */
+- err = priv->do_set_bittiming(dev);
+- if (err)
+- return err;
+- }
+- }
+-
+- if (data[IFLA_CAN_CTRLMODE]) {
+- struct can_ctrlmode *cm;
+- u32 ctrlstatic;
+- u32 maskedflags;
+-
+- /* Do not allow changing controller mode while running */
+- if (dev->flags & IFF_UP)
+- return -EBUSY;
+- cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+- ctrlstatic = priv->ctrlmode_static;
+- maskedflags = cm->flags & cm->mask;
+-
+- /* check whether provided bits are allowed to be passed */
+- if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
+- return -EOPNOTSUPP;
+-
+- /* do not check for static fd-non-iso if 'fd' is disabled */
+- if (!(maskedflags & CAN_CTRLMODE_FD))
+- ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+-
+- /* make sure static options are provided by configuration */
+- if ((maskedflags & ctrlstatic) != ctrlstatic)
+- return -EOPNOTSUPP;
+-
+- /* clear bits to be modified and copy the flag values */
+- priv->ctrlmode &= ~cm->mask;
+- priv->ctrlmode |= maskedflags;
+-
+- /* CAN_CTRLMODE_FD can only be set when driver supports FD */
+- if (priv->ctrlmode & CAN_CTRLMODE_FD)
+- dev->mtu = CANFD_MTU;
+- else
+- dev->mtu = CAN_MTU;
+- }
+-
+- if (data[IFLA_CAN_RESTART_MS]) {
+- /* Do not allow changing restart delay while running */
+- if (dev->flags & IFF_UP)
+- return -EBUSY;
+- priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+- }
+-
+- if (data[IFLA_CAN_RESTART]) {
+- /* Do not allow a restart while not running */
+- if (!(dev->flags & IFF_UP))
+- return -EINVAL;
+- err = can_restart_now(dev);
+- if (err)
+- return err;
+- }
+-
+- if (data[IFLA_CAN_DATA_BITTIMING]) {
+- struct can_bittiming dbt;
+-
+- /* Do not allow changing bittiming while running */
+- if (dev->flags & IFF_UP)
+- return -EBUSY;
+-
+- /* Calculate bittiming parameters based on
+- * data_bittiming_const if set, otherwise pass bitrate
+- * directly via do_set_bitrate(). Bail out if neither
+- * is given.
+- */
+- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
+- return -EOPNOTSUPP;
+-
+- memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
+- sizeof(dbt));
+- err = can_get_bittiming(dev, &dbt,
+- priv->data_bittiming_const,
+- priv->data_bitrate_const,
+- priv->data_bitrate_const_cnt);
+- if (err)
+- return err;
+-
+- if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+- netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
+- priv->bitrate_max);
+- return -EINVAL;
+- }
+-
+- memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+-
+- if (priv->do_set_data_bittiming) {
+- /* Finally, set the bit-timing registers */
+- err = priv->do_set_data_bittiming(dev);
+- if (err)
+- return err;
+- }
+- }
+-
+- if (data[IFLA_CAN_TERMINATION]) {
+- const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
+- const unsigned int num_term = priv->termination_const_cnt;
+- unsigned int i;
+-
+- if (!priv->do_set_termination)
+- return -EOPNOTSUPP;
+-
+- /* check whether given value is supported by the interface */
+- for (i = 0; i < num_term; i++) {
+- if (termval == priv->termination_const[i])
+- break;
+- }
+- if (i >= num_term)
+- return -EINVAL;
+-
+- /* Finally, set the termination value */
+- err = priv->do_set_termination(dev, termval);
+- if (err)
+- return err;
+-
+- priv->termination = termval;
+- }
+-
+- return 0;
+-}
+-
+-static size_t can_get_size(const struct net_device *dev)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+- size_t size = 0;
+-
+- if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
+- size += nla_total_size(sizeof(struct can_bittiming));
+- if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
+- size += nla_total_size(sizeof(struct can_bittiming_const));
+- size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
+- size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
+- size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
+- size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
+- if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
+- size += nla_total_size(sizeof(struct can_berr_counter));
+- if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
+- size += nla_total_size(sizeof(struct can_bittiming));
+- if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
+- size += nla_total_size(sizeof(struct can_bittiming_const));
+- if (priv->termination_const) {
+- size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
+- size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
+- priv->termination_const_cnt);
+- }
+- if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
+- size += nla_total_size(sizeof(*priv->bitrate_const) *
+- priv->bitrate_const_cnt);
+- if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
+- size += nla_total_size(sizeof(*priv->data_bitrate_const) *
+- priv->data_bitrate_const_cnt);
+- size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
+-
+- return size;
+-}
+-
+-static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+- struct can_ctrlmode cm = {.flags = priv->ctrlmode};
+- struct can_berr_counter bec = { };
+- enum can_state state = priv->state;
+-
+- if (priv->do_get_state)
+- priv->do_get_state(dev, &state);
+-
+- if ((priv->bittiming.bitrate &&
+- nla_put(skb, IFLA_CAN_BITTIMING,
+- sizeof(priv->bittiming), &priv->bittiming)) ||
+-
+- (priv->bittiming_const &&
+- nla_put(skb, IFLA_CAN_BITTIMING_CONST,
+- sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+-
+- nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
+- nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+- nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+- nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+-
+- (priv->do_get_berr_counter &&
+- !priv->do_get_berr_counter(dev, &bec) &&
+- nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
+-
+- (priv->data_bittiming.bitrate &&
+- nla_put(skb, IFLA_CAN_DATA_BITTIMING,
+- sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+-
+- (priv->data_bittiming_const &&
+- nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+- sizeof(*priv->data_bittiming_const),
+- priv->data_bittiming_const)) ||
+-
+- (priv->termination_const &&
+- (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
+- nla_put(skb, IFLA_CAN_TERMINATION_CONST,
+- sizeof(*priv->termination_const) *
+- priv->termination_const_cnt,
+- priv->termination_const))) ||
+-
+- (priv->bitrate_const &&
+- nla_put(skb, IFLA_CAN_BITRATE_CONST,
+- sizeof(*priv->bitrate_const) *
+- priv->bitrate_const_cnt,
+- priv->bitrate_const)) ||
+-
+- (priv->data_bitrate_const &&
+- nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
+- sizeof(*priv->data_bitrate_const) *
+- priv->data_bitrate_const_cnt,
+- priv->data_bitrate_const)) ||
+-
+- (nla_put(skb, IFLA_CAN_BITRATE_MAX,
+- sizeof(priv->bitrate_max),
+- &priv->bitrate_max))
+- )
+-
+- return -EMSGSIZE;
+-
+- return 0;
+-}
+-
+-static size_t can_get_xstats_size(const struct net_device *dev)
+-{
+- return sizeof(struct can_device_stats);
+-}
+-
+-static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+-
+- if (nla_put(skb, IFLA_INFO_XSTATS,
+- sizeof(priv->can_stats), &priv->can_stats))
+- goto nla_put_failure;
+- return 0;
+-
+-nla_put_failure:
+- return -EMSGSIZE;
+-}
+-
+-static int can_newlink(struct net *src_net, struct net_device *dev,
+- struct nlattr *tb[], struct nlattr *data[],
+- struct netlink_ext_ack *extack)
+-{
+- return -EOPNOTSUPP;
+-}
+-
+-static void can_dellink(struct net_device *dev, struct list_head *head)
+-{
+-}
+-
+-static struct rtnl_link_ops can_link_ops __read_mostly = {
+- .kind = "can",
+- .netns_refund = true,
+- .maxtype = IFLA_CAN_MAX,
+- .policy = can_policy,
+- .setup = can_setup,
+- .validate = can_validate,
+- .newlink = can_newlink,
+- .changelink = can_changelink,
+- .dellink = can_dellink,
+- .get_size = can_get_size,
+- .fill_info = can_fill_info,
+- .get_xstats_size = can_get_xstats_size,
+- .fill_xstats = can_fill_xstats,
+-};
+-
+-/* Register the CAN network device */
+-int register_candev(struct net_device *dev)
+-{
+- struct can_priv *priv = netdev_priv(dev);
+-
+- /* Ensure termination_const, termination_const_cnt and
+- * do_set_termination consistency. All must be either set or
+- * unset.
+- */
+- if ((!priv->termination_const != !priv->termination_const_cnt) ||
+- (!priv->termination_const != !priv->do_set_termination))
+- return -EINVAL;
+-
+- if (!priv->bitrate_const != !priv->bitrate_const_cnt)
+- return -EINVAL;
+-
+- if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+- return -EINVAL;
+-
+- dev->rtnl_link_ops = &can_link_ops;
+- netif_carrier_off(dev);
+-
+- return register_netdev(dev);
+-}
+-EXPORT_SYMBOL_GPL(register_candev);
+-
+-/* Unregister the CAN network device */
+-void unregister_candev(struct net_device *dev)
+-{
+- unregister_netdev(dev);
+-}
+-EXPORT_SYMBOL_GPL(unregister_candev);
+-
+-/* Test if a network device is a candev based device
+- * and return the can_priv* if so.
+- */
+-struct can_priv *safe_candev_priv(struct net_device *dev)
+-{
+- if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
+- return NULL;
+-
+- return netdev_priv(dev);
+-}
+-EXPORT_SYMBOL_GPL(safe_candev_priv);
+-
+-static __init int can_dev_init(void)
+-{
+- int err;
+-
+- can_led_notifier_init();
+-
+- err = rtnl_link_register(&can_link_ops);
+- if (!err)
+- pr_info(MOD_DESC "\n");
+-
+- return err;
+-}
+-module_init(can_dev_init);
+-
+-static __exit void can_dev_exit(void)
+-{
+- rtnl_link_unregister(&can_link_ops);
+-
+- can_led_notifier_exit();
+-}
+-module_exit(can_dev_exit);
+-
+-MODULE_ALIAS_RTNL_LINK("can");
+diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
+new file mode 100644
+index 0000000000000..cba92e6bcf6f5
+--- /dev/null
++++ b/drivers/net/can/dev/Makefile
+@@ -0,0 +1,7 @@
++# SPDX-License-Identifier: GPL-2.0
++
++obj-$(CONFIG_CAN_DEV) += can-dev.o
++can-dev-y += dev.o
++can-dev-y += rx-offload.o
++
++can-dev-$(CONFIG_CAN_LEDS) += led.o
+diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
+new file mode 100644
+index 0000000000000..a665afaeccd12
+--- /dev/null
++++ b/drivers/net/can/dev/dev.c
+@@ -0,0 +1,1341 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
++ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
++ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/netdevice.h>
++#include <linux/if_arp.h>
++#include <linux/workqueue.h>
++#include <linux/can.h>
++#include <linux/can/can-ml.h>
++#include <linux/can/dev.h>
++#include <linux/can/skb.h>
++#include <linux/can/netlink.h>
++#include <linux/can/led.h>
++#include <linux/of.h>
++#include <net/rtnetlink.h>
++
++#define MOD_DESC "CAN device driver interface"
++
++MODULE_DESCRIPTION(MOD_DESC);
++MODULE_LICENSE("GPL v2");
++MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
++
++/* CAN DLC to real data length conversion helpers */
++
++static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
++ 8, 12, 16, 20, 24, 32, 48, 64};
++
++/* get data length from raw data length code (DLC) */
++u8 can_fd_dlc2len(u8 dlc)
++{
++ return dlc2len[dlc & 0x0F];
++}
++EXPORT_SYMBOL_GPL(can_fd_dlc2len);
++
++static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
++ 9, 9, 9, 9, /* 9 - 12 */
++ 10, 10, 10, 10, /* 13 - 16 */
++ 11, 11, 11, 11, /* 17 - 20 */
++ 12, 12, 12, 12, /* 21 - 24 */
++ 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
++ 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
++ 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
++ 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
++ 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
++
++/* map the sanitized data length to an appropriate data length code */
++u8 can_fd_len2dlc(u8 len)
++{
++ if (unlikely(len > 64))
++ return 0xF;
++
++ return len2dlc[len];
++}
++EXPORT_SYMBOL_GPL(can_fd_len2dlc);
++
++#ifdef CONFIG_CAN_CALC_BITTIMING
++#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
++
++/* Bit-timing calculation derived from:
++ *
++ * Code based on LinCAN sources and H8S2638 project
++ * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
++ * Copyright 2005 Stanislav Marek
++ * email: pisa@cmp.felk.cvut.cz
++ *
++ * Calculates proper bit-timing parameters for a specified bit-rate
++ * and sample-point, which can then be used to set the bit-timing
++ * registers of the CAN controller. You can find more information
++ * in the header file linux/can/netlink.h.
++ */
++static int
++can_update_sample_point(const struct can_bittiming_const *btc,
++ unsigned int sample_point_nominal, unsigned int tseg,
++ unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
++ unsigned int *sample_point_error_ptr)
++{
++ unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
++ unsigned int sample_point, best_sample_point = 0;
++ unsigned int tseg1, tseg2;
++ int i;
++
++ for (i = 0; i <= 1; i++) {
++ tseg2 = tseg + CAN_SYNC_SEG -
++ (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
++ 1000 - i;
++ tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
++ tseg1 = tseg - tseg2;
++ if (tseg1 > btc->tseg1_max) {
++ tseg1 = btc->tseg1_max;
++ tseg2 = tseg - tseg1;
++ }
++
++ sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
++ (tseg + CAN_SYNC_SEG);
++ sample_point_error = abs(sample_point_nominal - sample_point);
++
++ if (sample_point <= sample_point_nominal &&
++ sample_point_error < best_sample_point_error) {
++ best_sample_point = sample_point;
++ best_sample_point_error = sample_point_error;
++ *tseg1_ptr = tseg1;
++ *tseg2_ptr = tseg2;
++ }
++ }
++
++ if (sample_point_error_ptr)
++ *sample_point_error_ptr = best_sample_point_error;
++
++ return best_sample_point;
++}
++
++static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
++ const struct can_bittiming_const *btc)
++{
++ struct can_priv *priv = netdev_priv(dev);
++ unsigned int bitrate; /* current bitrate */
++ unsigned int bitrate_error; /* difference between current and nominal value */
++ unsigned int best_bitrate_error = UINT_MAX;
++ unsigned int sample_point_error; /* difference between current and nominal value */
++ unsigned int best_sample_point_error = UINT_MAX;
++ unsigned int sample_point_nominal; /* nominal sample point */
++ unsigned int best_tseg = 0; /* current best value for tseg */
++ unsigned int best_brp = 0; /* current best value for brp */
++ unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
++ u64 v64;
++
++ /* Use CiA recommended sample points */
++ if (bt->sample_point) {
++ sample_point_nominal = bt->sample_point;
++ } else {
++ if (bt->bitrate > 800000)
++ sample_point_nominal = 750;
++ else if (bt->bitrate > 500000)
++ sample_point_nominal = 800;
++ else
++ sample_point_nominal = 875;
++ }
++
++ /* tseg even = round down, odd = round up */
++ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
++ tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
++ tsegall = CAN_SYNC_SEG + tseg / 2;
++
++ /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
++ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
++
++ /* choose brp step which is possible in system */
++ brp = (brp / btc->brp_inc) * btc->brp_inc;
++ if (brp < btc->brp_min || brp > btc->brp_max)
++ continue;
++
++ bitrate = priv->clock.freq / (brp * tsegall);
++ bitrate_error = abs(bt->bitrate - bitrate);
++
++ /* tseg brp biterror */
++ if (bitrate_error > best_bitrate_error)
++ continue;
++
++ /* reset sample point error if we have a better bitrate */
++ if (bitrate_error < best_bitrate_error)
++ best_sample_point_error = UINT_MAX;
++
++ can_update_sample_point(btc, sample_point_nominal, tseg / 2,
++ &tseg1, &tseg2, &sample_point_error);
++ if (sample_point_error > best_sample_point_error)
++ continue;
++
++ best_sample_point_error = sample_point_error;
++ best_bitrate_error = bitrate_error;
++ best_tseg = tseg / 2;
++ best_brp = brp;
++
++ if (bitrate_error == 0 && sample_point_error == 0)
++ break;
++ }
++
++ if (best_bitrate_error) {
++ /* Error in one-tenth of a percent */
++ v64 = (u64)best_bitrate_error * 1000;
++ do_div(v64, bt->bitrate);
++ bitrate_error = (u32)v64;
++ if (bitrate_error > CAN_CALC_MAX_ERROR) {
++ netdev_err(dev,
++ "bitrate error %d.%d%% too high\n",
++ bitrate_error / 10, bitrate_error % 10);
++ return -EDOM;
++ }
++ netdev_warn(dev, "bitrate error %d.%d%%\n",
++ bitrate_error / 10, bitrate_error % 10);
++ }
++
++ /* real sample point */
++ bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
++ best_tseg, &tseg1, &tseg2,
++ NULL);
++
++ v64 = (u64)best_brp * 1000 * 1000 * 1000;
++ do_div(v64, priv->clock.freq);
++ bt->tq = (u32)v64;
++ bt->prop_seg = tseg1 / 2;
++ bt->phase_seg1 = tseg1 - bt->prop_seg;
++ bt->phase_seg2 = tseg2;
++
++ /* check for sjw user settings */
++ if (!bt->sjw || !btc->sjw_max) {
++ bt->sjw = 1;
++ } else {
++ /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
++ if (bt->sjw > btc->sjw_max)
++ bt->sjw = btc->sjw_max;
++ /* bt->sjw must not be higher than tseg2 */
++ if (tseg2 < bt->sjw)
++ bt->sjw = tseg2;
++ }
++
++ bt->brp = best_brp;
++
++ /* real bitrate */
++ bt->bitrate = priv->clock.freq /
++ (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
++
++ return 0;
++}
++#else /* !CONFIG_CAN_CALC_BITTIMING */
++static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
++ const struct can_bittiming_const *btc)
++{
++ netdev_err(dev, "bit-timing calculation not available\n");
++ return -EINVAL;
++}
++#endif /* CONFIG_CAN_CALC_BITTIMING */
++
++/* Checks the validity of the specified bit-timing parameters prop_seg,
++ * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
++ * prescaler value brp. You can find more information in the header
++ * file linux/can/netlink.h.
++ */
++static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
++ const struct can_bittiming_const *btc)
++{
++ struct can_priv *priv = netdev_priv(dev);
++ int tseg1, alltseg;
++ u64 brp64;
++
++ tseg1 = bt->prop_seg + bt->phase_seg1;
++ if (!bt->sjw)
++ bt->sjw = 1;
++ if (bt->sjw > btc->sjw_max ||
++ tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
++ bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
++ return -ERANGE;
++
++ brp64 = (u64)priv->clock.freq * (u64)bt->tq;
++ if (btc->brp_inc > 1)
++ do_div(brp64, btc->brp_inc);
++ brp64 += 500000000UL - 1;
++ do_div(brp64, 1000000000UL); /* the practicable BRP */
++ if (btc->brp_inc > 1)
++ brp64 *= btc->brp_inc;
++ bt->brp = (u32)brp64;
++
++ if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
++ return -EINVAL;
++
++ alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
++ bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
++ bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
++
++ return 0;
++}
++
++/* Checks the validity of predefined bitrate settings */
++static int
++can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
++ const u32 *bitrate_const,
++ const unsigned int bitrate_const_cnt)
++{
++ struct can_priv *priv = netdev_priv(dev);
++ unsigned int i;
++
++ for (i = 0; i < bitrate_const_cnt; i++) {
++ if (bt->bitrate == bitrate_const[i])
++ break;
++ }
++
++ if (i >= priv->bitrate_const_cnt)
++ return -EINVAL;
++
++ return 0;
++}
++
++static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
++ const struct can_bittiming_const *btc,
++ const u32 *bitrate_const,
++ const unsigned int bitrate_const_cnt)
++{
++ int err;
++
++ /* Depending on the given can_bittiming parameter structure the CAN
++ * timing parameters are calculated based on the provided bitrate OR
++ * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
++ * provided directly which are then checked and fixed up.
++ */
++ if (!bt->tq && bt->bitrate && btc)
++ err = can_calc_bittiming(dev, bt, btc);
++ else if (bt->tq && !bt->bitrate && btc)
++ err = can_fixup_bittiming(dev, bt, btc);
++ else if (!bt->tq && bt->bitrate && bitrate_const)
++ err = can_validate_bitrate(dev, bt, bitrate_const,
++ bitrate_const_cnt);
++ else
++ err = -EINVAL;
++
++ return err;
++}
++
++static void can_update_state_error_stats(struct net_device *dev,
++ enum can_state new_state)
++{
++ struct can_priv *priv = netdev_priv(dev);
++
++ if (new_state <= priv->state)
++ return;
++
++ switch (new_state) {
++ case CAN_STATE_ERROR_WARNING:
++ priv->can_stats.error_warning++;
++ break;
++ case CAN_STATE_ERROR_PASSIVE:
++ priv->can_stats.error_passive++;
++ break;
++ case CAN_STATE_BUS_OFF:
++ priv->can_stats.bus_off++;
++ break;
++ default:
++ break;
++ }
++}
++
++static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
++{
++ switch (state) {
++ case CAN_STATE_ERROR_ACTIVE:
++ return CAN_ERR_CRTL_ACTIVE;
++ case CAN_STATE_ERROR_WARNING:
++ return CAN_ERR_CRTL_TX_WARNING;
++ case CAN_STATE_ERROR_PASSIVE:
++ return CAN_ERR_CRTL_TX_PASSIVE;
++ default:
++ return 0;
++ }
++}
++
++static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
++{
++ switch (state) {
++ case CAN_STATE_ERROR_ACTIVE:
++ return CAN_ERR_CRTL_ACTIVE;
++ case CAN_STATE_ERROR_WARNING:
++ return CAN_ERR_CRTL_RX_WARNING;
++ case CAN_STATE_ERROR_PASSIVE:
++ return CAN_ERR_CRTL_RX_PASSIVE;
++ default:
++ return 0;
++ }
++}
++
++static const char *can_get_state_str(const enum can_state state)
++{
++ switch (state) {
++ case CAN_STATE_ERROR_ACTIVE:
++ return "Error Active";
++ case CAN_STATE_ERROR_WARNING:
++ return "Error Warning";
++ case CAN_STATE_ERROR_PASSIVE:
++ return "Error Passive";
++ case CAN_STATE_BUS_OFF:
++ return "Bus Off";
++ case CAN_STATE_STOPPED:
++ return "Stopped";
++ case CAN_STATE_SLEEPING:
++ return "Sleeping";
++ default:
++ return "<unknown>";
++ }
++
++ return "<unknown>";
++}
++
++void can_change_state(struct net_device *dev, struct can_frame *cf,
++ enum can_state tx_state, enum can_state rx_state)
++{
++ struct can_priv *priv = netdev_priv(dev);
++ enum can_state new_state = max(tx_state, rx_state);
++
++ if (unlikely(new_state == priv->state)) {
++ netdev_warn(dev, "%s: oops, state did not change", __func__);
++ return;
++ }
++
++ netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
++ can_get_state_str(priv->state), priv->state,
++ can_get_state_str(new_state), new_state);
++
++ can_update_state_error_stats(dev, new_state);
++ priv->state = new_state;
++
++ if (!cf)
++ return;
++
++ if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
++ cf->can_id |= CAN_ERR_BUSOFF;
++ return;
++ }
++
++ cf->can_id |= CAN_ERR_CRTL;
++ cf->data[1] |= tx_state >= rx_state ?
++ can_tx_state_to_frame(dev, tx_state) : 0;
++ cf->data[1] |= tx_state <= rx_state ?
++ can_rx_state_to_frame(dev, rx_state) : 0;
++}
++EXPORT_SYMBOL_GPL(can_change_state);
++
++/* Local echo of CAN messages
++ *
++ * CAN network devices *should* support a local echo functionality
++ * (see Documentation/networking/can.rst). To test the handling of CAN
++ * interfaces that do not support the local echo both driver types are
++ * implemented. In the case that the driver does not support the echo
++ * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
++ * to perform the echo as a fallback solution.
++ */
++static void can_flush_echo_skb(struct net_device *dev)
++{
++ struct can_priv *priv = netdev_priv(dev);
++ struct net_device_stats *stats = &dev->stats;
++ int i;
++
++ for (i = 0; i < priv->echo_skb_max; i++) {
++ if (priv->echo_skb[i]) {
++ kfree_skb(priv->echo_skb[i]);
++ priv->echo_skb[i] = NULL;
++ stats->tx_dropped++;
++ stats->tx_aborted_errors++;
++ }
++ }
++}
++
++/* Put the skb on the stack to be looped backed locally lateron
++ *
++ * The function is typically called in the start_xmit function
++ * of the device driver. The driver must protect access to
++ * priv->echo_skb, if necessary.
++ */
++int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
++ unsigned int idx)
++{
++ struct can_priv *priv = netdev_priv(dev);
++
++ BUG_ON(idx >= priv->echo_skb_max);
++
++ /* check flag whether this packet has to be looped back */
++ if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
++ (skb->protocol != htons(ETH_P_CAN) &&
++ skb->protocol != htons(ETH_P_CANFD))) {
++ kfree_skb(skb);
++ return 0;
++ }
++
++ if (!priv->echo_skb[idx]) {
++ skb = can_create_echo_skb(skb);
++ if (!skb)
++ return -ENOMEM;
++
++ /* make settings for echo to reduce code in irq context */
++ skb->pkt_type = PACKET_BROADCAST;
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ skb->dev = dev;
++
++ /* save this skb for tx interrupt echo handling */
++ priv->echo_skb[idx] = skb;
++ } else {
++ /* locking problem with netif_stop_queue() ?? */
++ netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
++ kfree_skb(skb);
++ return -EBUSY;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(can_put_echo_skb);
++
++struct sk_buff *
++__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
++{
++ struct can_priv *priv = netdev_priv(dev);
++
++ if (idx >= priv->echo_skb_max) {
++ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
++ __func__, idx, priv->echo_skb_max);
++ return NULL;
++ }
++
++ if (priv->echo_skb[idx]) {
++ /* Using "struct canfd_frame::len" for the frame
++ * length is supported on both CAN and CANFD frames.
++ */
++ struct sk_buff *skb = priv->echo_skb[idx];
++ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
++
++ /* get the real payload length for netdev statistics */
++ if (cf->can_id & CAN_RTR_FLAG)
++ *len_ptr = 0;
++ else
++ *len_ptr = cf->len;
++
++ priv->echo_skb[idx] = NULL;
++
++ return skb;
++ }
++
++ return NULL;
++}
++
++/* Get the skb from the stack and loop it back locally
++ *
++ * The function is typically called when the TX done interrupt
++ * is handled in the device driver. The driver must protect
++ * access to priv->echo_skb, if necessary.
++ */
++unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
++{
++ struct sk_buff *skb;
++ u8 len;
++
++ skb = __can_get_echo_skb(dev, idx, &len);
++ if (!skb)
++ return 0;
++
++ skb_get(skb);
++ if (netif_rx(skb) == NET_RX_SUCCESS)
++ dev_consume_skb_any(skb);
++ else
++ dev_kfree_skb_any(skb);
++
++ return len;
++}
++EXPORT_SYMBOL_GPL(can_get_echo_skb);
++
++/* Remove the skb from the stack and free it.
++ *
++ * The function is typically called when TX failed.
++ */
++void can_free_echo_skb(struct net_device *dev, unsigned int idx)
++{
++ struct can_priv *priv = netdev_priv(dev);
++
++ BUG_ON(idx >= priv->echo_skb_max);
++
++ if (priv->echo_skb[idx]) {
++ dev_kfree_skb_any(priv->echo_skb[idx]);
++ priv->echo_skb[idx] = NULL;
++ }
++}
++EXPORT_SYMBOL_GPL(can_free_echo_skb);
++
++/* CAN device restart for bus-off recovery */
++static void can_restart(struct net_device *dev)
++{
++ struct can_priv *priv = netdev_priv(dev);
++ struct net_device_stats *stats = &dev->stats;
++ struct sk_buff *skb;
++ struct can_frame *cf;
++ int err;
++
++ BUG_ON(netif_carrier_ok(dev));
++
++ /* No synchronization needed because the device is bus-off and
++ * no messages can come in or go out.
++ */
++ can_flush_echo_skb(dev);
++
++ /* send restart message upstream */
++ skb = alloc_can_err_skb(dev, &cf);
++ if (!skb)
++ goto restart;
++
++ cf->can_id |= CAN_ERR_RESTARTED;
++
++ stats->rx_packets++;
++ stats->rx_bytes += cf->len;
++
++ netif_rx_ni(skb);
++
++restart:
++ netdev_dbg(dev, "restarted\n");
++ priv->can_stats.restarts++;
++
++ /* Now restart the device */
++ err = priv->do_set_mode(dev, CAN_MODE_START);
++
++ netif_carrier_on(dev);
++ if (err)
++ netdev_err(dev, "Error %d during restart", err);
++}
++
++static void can_restart_work(struct work_struct *work)
++{
++ struct delayed_work *dwork = to_delayed_work(work);
++ struct can_priv *priv = container_of(dwork, struct can_priv,
++ restart_work);
++
++ can_restart(priv->dev);
++}
++
++int can_restart_now(struct net_device *dev)
++{
++ struct can_priv *priv = netdev_priv(dev);
++
++ /* A manual restart is only permitted if automatic restart is
++ * disabled and the device is in the bus-off state
++ */
++ if (priv->restart_ms)
++ return -EINVAL;
++ if (priv->state != CAN_STATE_BUS_OFF)
++ return -EBUSY;
++
++ cancel_delayed_work_sync(&priv->restart_work);
++ can_restart(dev);
++
++ return 0;
++}
++
++/* CAN bus-off
++ *
++ * This functions should be called when the device goes bus-off to
++ * tell the netif layer that no more packets can be sent or received.
++ * If enabled, a timer is started to trigger bus-off recovery.
++ */
++void can_bus_off(struct net_device *dev)
++{
++ struct can_priv *priv = netdev_priv(dev);
++
++ if (priv->restart_ms)
++ netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
++ priv->restart_ms);
++ else
++ netdev_info(dev, "bus-off\n");
++
++ netif_carrier_off(dev);
++
++ if (priv->restart_ms)
++ schedule_delayed_work(&priv->restart_work,
++ msecs_to_jiffies(priv->restart_ms));
++}
++EXPORT_SYMBOL_GPL(can_bus_off);
++
++static void can_setup(struct net_device *dev)
++{
++ dev->type = ARPHRD_CAN;
++ dev->mtu = CAN_MTU;
++ dev->hard_header_len = 0;
++ dev->addr_len = 0;
++ dev->tx_queue_len = 10;
++
++ /* New-style flags. */
++ dev->flags = IFF_NOARP;
++ dev->features = NETIF_F_HW_CSUM;
++}
++
++struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
++{
++ struct sk_buff *skb;
++
++ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
++ sizeof(struct can_frame));
++ if (unlikely(!skb))
++ return NULL;
++
++ skb->protocol = htons(ETH_P_CAN);
++ skb->pkt_type = PACKET_BROADCAST;
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ skb_reset_mac_header(skb);
++ skb_reset_network_header(skb);
++ skb_reset_transport_header(skb);
++
++ can_skb_reserve(skb);
++ can_skb_prv(skb)->ifindex = dev->ifindex;
++ can_skb_prv(skb)->skbcnt = 0;
++
++ *cf = skb_put_zero(skb, sizeof(struct can_frame));
++
++ return skb;
++}
++EXPORT_SYMBOL_GPL(alloc_can_skb);
++
++struct sk_buff *alloc_canfd_skb(struct net_device *dev,
++ struct canfd_frame **cfd)
++{
++ struct sk_buff *skb;
++
++ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
++ sizeof(struct canfd_frame));
++ if (unlikely(!skb))
++ return NULL;
++
++ skb->protocol = htons(ETH_P_CANFD);
++ skb->pkt_type = PACKET_BROADCAST;
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ skb_reset_mac_header(skb);
++ skb_reset_network_header(skb);
++ skb_reset_transport_header(skb);
++
++ can_skb_reserve(skb);
++ can_skb_prv(skb)->ifindex = dev->ifindex;
++ can_skb_prv(skb)->skbcnt = 0;
++
++ *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
++
++ return skb;
++}
++EXPORT_SYMBOL_GPL(alloc_canfd_skb);
++
++struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
++{
++ struct sk_buff *skb;
++
++ skb = alloc_can_skb(dev, cf);
++ if (unlikely(!skb))
++ return NULL;
++
++ (*cf)->can_id = CAN_ERR_FLAG;
++ (*cf)->len = CAN_ERR_DLC;
++
++ return skb;
++}
++EXPORT_SYMBOL_GPL(alloc_can_err_skb);
++
++/* Allocate and setup space for the CAN network device */
++struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
++ unsigned int txqs, unsigned int rxqs)
++{
++ struct can_ml_priv *can_ml;
++ struct net_device *dev;
++ struct can_priv *priv;
++ int size;
++
++ /* We put the driver's priv, the CAN mid layer priv and the
++ * echo skb into the netdevice's priv. The memory layout for
++ * the netdev_priv is like this:
++ *
++ * +-------------------------+
++ * | driver's priv |
++ * +-------------------------+
++ * | struct can_ml_priv |
++ * +-------------------------+
++ * | array of struct sk_buff |
++ * +-------------------------+
++ */
++
++ size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
++
++ if (echo_skb_max)
++ size = ALIGN(size, sizeof(struct sk_buff *)) +
++ echo_skb_max * sizeof(struct sk_buff *);
++
++ dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
++ txqs, rxqs);
++ if (!dev)
++ return NULL;
++
++ priv = netdev_priv(dev);
++ priv->dev = dev;
++
++ can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
++ can_set_ml_priv(dev, can_ml);
++
++ if (echo_skb_max) {
++ priv->echo_skb_max = echo_skb_max;
++ priv->echo_skb = (void *)priv +
++ (size - echo_skb_max * sizeof(struct sk_buff *));
++ }
++
++ priv->state = CAN_STATE_STOPPED;
++
++ INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
++
++ return dev;
++}
++EXPORT_SYMBOL_GPL(alloc_candev_mqs);
++
++/* Free space of the CAN network device */
++void free_candev(struct net_device *dev)
++{
++ free_netdev(dev);
++}
++EXPORT_SYMBOL_GPL(free_candev);
++
++/* changing MTU and control mode for CAN/CANFD devices */
++int can_change_mtu(struct net_device *dev, int new_mtu)
++{
++ struct can_priv *priv = netdev_priv(dev);
++
++ /* Do not allow changing the MTU while running */
++ if (dev->flags & IFF_UP)
++ return -EBUSY;
++
++ /* allow change of MTU according to the CANFD ability of the device */
++ switch (new_mtu) {
++ case CAN_MTU:
++ /* 'CANFD-only' controllers can not switch to CAN_MTU */
++ if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
++ return -EINVAL;
++
++ priv->ctrlmode &= ~CAN_CTRLMODE_FD;
++ break;
++
++ case CANFD_MTU:
++ /* check for potential CANFD ability */
++ if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
++ !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
++ return -EINVAL;
++
++ priv->ctrlmode |= CAN_CTRLMODE_FD;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ dev->mtu = new_mtu;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(can_change_mtu);
++
++/* Common open function when the device gets opened.
++ *
++ * This function should be called in the open function of the device
++ * driver.
++ */
++int open_candev(struct net_device *dev)
++{
++ struct can_priv *priv = netdev_priv(dev);
++
++ if (!priv->bittiming.bitrate) {
++ netdev_err(dev, "bit-timing not yet defined\n");
++ return -EINVAL;
++ }
++
++ /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
++ if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
++ (!priv->data_bittiming.bitrate ||
++ priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
++ netdev_err(dev, "incorrect/missing data bit-timing\n");
++ return -EINVAL;
++ }
++
++ /* Switch carrier on if device was stopped while in bus-off state */
++ if (!netif_carrier_ok(dev))
++ netif_carrier_on(dev);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(open_candev);
++
++#ifdef CONFIG_OF
++/* Common function that can be used to understand the limitation of
++ * a transceiver when it provides no means to determine these limitations
++ * at runtime.
++ */
++void of_can_transceiver(struct net_device *dev)
++{
++ struct device_node *dn;
++ struct can_priv *priv = netdev_priv(dev);
++ struct device_node *np = dev->dev.parent->of_node;
++ int ret;
++
++ dn = of_get_child_by_name(np, "can-transceiver");
++ if (!dn)
++ return;
++
++ ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
++ of_node_put(dn);
++ if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
++ netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
++}
++EXPORT_SYMBOL_GPL(of_can_transceiver);
++#endif
++
++/* Common close function for cleanup before the device gets closed.
++ *
++ * This function should be called in the close function of the device
++ * driver.
++ */
++void close_candev(struct net_device *dev)
++{
++ struct can_priv *priv = netdev_priv(dev);
++
++ cancel_delayed_work_sync(&priv->restart_work);
++ can_flush_echo_skb(dev);
++}
++EXPORT_SYMBOL_GPL(close_candev);
++
++/* CAN netlink interface */
++static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
++ [IFLA_CAN_STATE] = { .type = NLA_U32 },
++ [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
++ [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
++ [IFLA_CAN_RESTART] = { .type = NLA_U32 },
++ [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
++ [IFLA_CAN_BITTIMING_CONST]
++ = { .len = sizeof(struct can_bittiming_const) },
++ [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
++ [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
++ [IFLA_CAN_DATA_BITTIMING]
++ = { .len = sizeof(struct can_bittiming) },
++ [IFLA_CAN_DATA_BITTIMING_CONST]
++ = { .len = sizeof(struct can_bittiming_const) },
++ [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
++};
++
++static int can_validate(struct nlattr *tb[], struct nlattr *data[],
++ struct netlink_ext_ack *extack)
++{
++ bool is_can_fd = false;
++
++ /* Make sure that valid CAN FD configurations always consist of
++ * - nominal/arbitration bittiming
++ * - data bittiming
++ * - control mode with CAN_CTRLMODE_FD set
++ */
++
++ if (!data)
++ return 0;
++
++ if (data[IFLA_CAN_CTRLMODE]) {
++ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
++
++ is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
++ }
++
++ if (is_can_fd) {
++ if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
++ return -EOPNOTSUPP;
++ }
++
++ if (data[IFLA_CAN_DATA_BITTIMING]) {
++ if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++static int can_changelink(struct net_device *dev, struct nlattr *tb[],
++ struct nlattr *data[],
++ struct netlink_ext_ack *extack)
++{
++ struct can_priv *priv = netdev_priv(dev);
++ int err;
++
++ /* We need synchronization with dev->stop() */
++ ASSERT_RTNL();
++
++ if (data[IFLA_CAN_BITTIMING]) {
++ struct can_bittiming bt;
++
++ /* Do not allow changing bittiming while running */
++ if (dev->flags & IFF_UP)
++ return -EBUSY;
++
++ /* Calculate bittiming parameters based on
++ * bittiming_const if set, otherwise pass bitrate
++ * directly via do_set_bitrate(). Bail out if neither
++ * is given.
++ */
++ if (!priv->bittiming_const && !priv->do_set_bittiming)
++ return -EOPNOTSUPP;
++
++ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
++ err = can_get_bittiming(dev, &bt,
++ priv->bittiming_const,
++ priv->bitrate_const,
++ priv->bitrate_const_cnt);
++ if (err)
++ return err;
++
++ if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
++ netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
++ priv->bitrate_max);
++ return -EINVAL;
++ }
++
++ memcpy(&priv->bittiming, &bt, sizeof(bt));
++
++ if (priv->do_set_bittiming) {
++ /* Finally, set the bit-timing registers */
++ err = priv->do_set_bittiming(dev);
++ if (err)
++ return err;
++ }
++ }
++
++ if (data[IFLA_CAN_CTRLMODE]) {
++ struct can_ctrlmode *cm;
++ u32 ctrlstatic;
++ u32 maskedflags;
++
++ /* Do not allow changing controller mode while running */
++ if (dev->flags & IFF_UP)
++ return -EBUSY;
++ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
++ ctrlstatic = priv->ctrlmode_static;
++ maskedflags = cm->flags & cm->mask;
++
++ /* check whether provided bits are allowed to be passed */
++ if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
++ return -EOPNOTSUPP;
++
++ /* do not check for static fd-non-iso if 'fd' is disabled */
++ if (!(maskedflags & CAN_CTRLMODE_FD))
++ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
++
++ /* make sure static options are provided by configuration */
++ if ((maskedflags & ctrlstatic) != ctrlstatic)
++ return -EOPNOTSUPP;
++
++ /* clear bits to be modified and copy the flag values */
++ priv->ctrlmode &= ~cm->mask;
++ priv->ctrlmode |= maskedflags;
++
++ /* CAN_CTRLMODE_FD can only be set when driver supports FD */
++ if (priv->ctrlmode & CAN_CTRLMODE_FD)
++ dev->mtu = CANFD_MTU;
++ else
++ dev->mtu = CAN_MTU;
++ }
++
++ if (data[IFLA_CAN_RESTART_MS]) {
++ /* Do not allow changing restart delay while running */
++ if (dev->flags & IFF_UP)
++ return -EBUSY;
++ priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
++ }
++
++ if (data[IFLA_CAN_RESTART]) {
++ /* Do not allow a restart while not running */
++ if (!(dev->flags & IFF_UP))
++ return -EINVAL;
++ err = can_restart_now(dev);
++ if (err)
++ return err;
++ }
++
++ if (data[IFLA_CAN_DATA_BITTIMING]) {
++ struct can_bittiming dbt;
++
++ /* Do not allow changing bittiming while running */
++ if (dev->flags & IFF_UP)
++ return -EBUSY;
++
++ /* Calculate bittiming parameters based on
++ * data_bittiming_const if set, otherwise pass bitrate
++ * directly via do_set_bitrate(). Bail out if neither
++ * is given.
++ */
++ if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
++ return -EOPNOTSUPP;
++
++ memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
++ sizeof(dbt));
++ err = can_get_bittiming(dev, &dbt,
++ priv->data_bittiming_const,
++ priv->data_bitrate_const,
++ priv->data_bitrate_const_cnt);
++ if (err)
++ return err;
++
++ if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
++ netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
++ priv->bitrate_max);
++ return -EINVAL;
++ }
++
++ memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
++
++ if (priv->do_set_data_bittiming) {
++ /* Finally, set the bit-timing registers */
++ err = priv->do_set_data_bittiming(dev);
++ if (err)
++ return err;
++ }
++ }
++
++ if (data[IFLA_CAN_TERMINATION]) {
++ const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
++ const unsigned int num_term = priv->termination_const_cnt;
++ unsigned int i;
++
++ if (!priv->do_set_termination)
++ return -EOPNOTSUPP;
++
++ /* check whether given value is supported by the interface */
++ for (i = 0; i < num_term; i++) {
++ if (termval == priv->termination_const[i])
++ break;
++ }
++ if (i >= num_term)
++ return -EINVAL;
++
++ /* Finally, set the termination value */
++ err = priv->do_set_termination(dev, termval);
++ if (err)
++ return err;
++
++ priv->termination = termval;
++ }
++
++ return 0;
++}
++
++static size_t can_get_size(const struct net_device *dev)
++{
++ struct can_priv *priv = netdev_priv(dev);
++ size_t size = 0;
++
++ if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
++ size += nla_total_size(sizeof(struct can_bittiming));
++ if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
++ size += nla_total_size(sizeof(struct can_bittiming_const));
++ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
++ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
++ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
++ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
++ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
++ size += nla_total_size(sizeof(struct can_berr_counter));
++ if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
++ size += nla_total_size(sizeof(struct can_bittiming));
++ if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
++ size += nla_total_size(sizeof(struct can_bittiming_const));
++ if (priv->termination_const) {
++ size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
++ size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
++ priv->termination_const_cnt);
++ }
++ if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
++ size += nla_total_size(sizeof(*priv->bitrate_const) *
++ priv->bitrate_const_cnt);
++ if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
++ size += nla_total_size(sizeof(*priv->data_bitrate_const) *
++ priv->data_bitrate_const_cnt);
++ size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
++
++ return size;
++}
++
++static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
++{
++ struct can_priv *priv = netdev_priv(dev);
++ struct can_ctrlmode cm = {.flags = priv->ctrlmode};
++ struct can_berr_counter bec = { };
++ enum can_state state = priv->state;
++
++ if (priv->do_get_state)
++ priv->do_get_state(dev, &state);
++
++ if ((priv->bittiming.bitrate &&
++ nla_put(skb, IFLA_CAN_BITTIMING,
++ sizeof(priv->bittiming), &priv->bittiming)) ||
++
++ (priv->bittiming_const &&
++ nla_put(skb, IFLA_CAN_BITTIMING_CONST,
++ sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
++
++ nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
++ nla_put_u32(skb, IFLA_CAN_STATE, state) ||
++ nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
++ nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
++
++ (priv->do_get_berr_counter &&
++ !priv->do_get_berr_counter(dev, &bec) &&
++ nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
++
++ (priv->data_bittiming.bitrate &&
++ nla_put(skb, IFLA_CAN_DATA_BITTIMING,
++ sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
++
++ (priv->data_bittiming_const &&
++ nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
++ sizeof(*priv->data_bittiming_const),
++ priv->data_bittiming_const)) ||
++
++ (priv->termination_const &&
++ (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
++ nla_put(skb, IFLA_CAN_TERMINATION_CONST,
++ sizeof(*priv->termination_const) *
++ priv->termination_const_cnt,
++ priv->termination_const))) ||
++
++ (priv->bitrate_const &&
++ nla_put(skb, IFLA_CAN_BITRATE_CONST,
++ sizeof(*priv->bitrate_const) *
++ priv->bitrate_const_cnt,
++ priv->bitrate_const)) ||
++
++ (priv->data_bitrate_const &&
++ nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
++ sizeof(*priv->data_bitrate_const) *
++ priv->data_bitrate_const_cnt,
++ priv->data_bitrate_const)) ||
++
++ (nla_put(skb, IFLA_CAN_BITRATE_MAX,
++ sizeof(priv->bitrate_max),
++ &priv->bitrate_max))
++ )
++
++ return -EMSGSIZE;
++
++ return 0;
++}
++
++static size_t can_get_xstats_size(const struct net_device *dev)
++{
++ return sizeof(struct can_device_stats);
++}
++
++static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
++{
++ struct can_priv *priv = netdev_priv(dev);
++
++ if (nla_put(skb, IFLA_INFO_XSTATS,
++ sizeof(priv->can_stats), &priv->can_stats))
++ goto nla_put_failure;
++ return 0;
++
++nla_put_failure:
++ return -EMSGSIZE;
++}
++
++static int can_newlink(struct net *src_net, struct net_device *dev,
++ struct nlattr *tb[], struct nlattr *data[],
++ struct netlink_ext_ack *extack)
++{
++ return -EOPNOTSUPP;
++}
++
++static void can_dellink(struct net_device *dev, struct list_head *head)
++{
++}
++
++static struct rtnl_link_ops can_link_ops __read_mostly = {
++ .kind = "can",
++ .netns_refund = true,
++ .maxtype = IFLA_CAN_MAX,
++ .policy = can_policy,
++ .setup = can_setup,
++ .validate = can_validate,
++ .newlink = can_newlink,
++ .changelink = can_changelink,
++ .dellink = can_dellink,
++ .get_size = can_get_size,
++ .fill_info = can_fill_info,
++ .get_xstats_size = can_get_xstats_size,
++ .fill_xstats = can_fill_xstats,
++};
++
++/* Register the CAN network device */
++int register_candev(struct net_device *dev)
++{
++ struct can_priv *priv = netdev_priv(dev);
++
++ /* Ensure termination_const, termination_const_cnt and
++ * do_set_termination consistency. All must be either set or
++ * unset.
++ */
++ if ((!priv->termination_const != !priv->termination_const_cnt) ||
++ (!priv->termination_const != !priv->do_set_termination))
++ return -EINVAL;
++
++ if (!priv->bitrate_const != !priv->bitrate_const_cnt)
++ return -EINVAL;
++
++ if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
++ return -EINVAL;
++
++ dev->rtnl_link_ops = &can_link_ops;
++ netif_carrier_off(dev);
++
++ return register_netdev(dev);
++}
++EXPORT_SYMBOL_GPL(register_candev);
++
++/* Unregister the CAN network device */
++void unregister_candev(struct net_device *dev)
++{
++ unregister_netdev(dev);
++}
++EXPORT_SYMBOL_GPL(unregister_candev);
++
++/* Test if a network device is a candev based device
++ * and return the can_priv* if so.
++ */
++struct can_priv *safe_candev_priv(struct net_device *dev)
++{
++ if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
++ return NULL;
++
++ return netdev_priv(dev);
++}
++EXPORT_SYMBOL_GPL(safe_candev_priv);
++
++static __init int can_dev_init(void)
++{
++ int err;
++
++ can_led_notifier_init();
++
++ err = rtnl_link_register(&can_link_ops);
++ if (!err)
++ pr_info(MOD_DESC "\n");
++
++ return err;
++}
++module_init(can_dev_init);
++
++static __exit void can_dev_exit(void)
++{
++ rtnl_link_unregister(&can_link_ops);
++
++ can_led_notifier_exit();
++}
++module_exit(can_dev_exit);
++
++MODULE_ALIAS_RTNL_LINK("can");
+diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
+new file mode 100644
+index 0000000000000..3c1912c0430b6
+--- /dev/null
++++ b/drivers/net/can/dev/rx-offload.c
+@@ -0,0 +1,376 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (c) 2014 Protonic Holland,
++ * David Jander
++ * Copyright (C) 2014-2017 Pengutronix,
++ * Marc Kleine-Budde <kernel@pengutronix.de>
++ */
++
++#include <linux/can/dev.h>
++#include <linux/can/rx-offload.h>
++
++struct can_rx_offload_cb {
++ u32 timestamp;
++};
++
++static inline struct can_rx_offload_cb *
++can_rx_offload_get_cb(struct sk_buff *skb)
++{
++ BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
++
++ return (struct can_rx_offload_cb *)skb->cb;
++}
++
++static inline bool
++can_rx_offload_le(struct can_rx_offload *offload,
++ unsigned int a, unsigned int b)
++{
++ if (offload->inc)
++ return a <= b;
++ else
++ return a >= b;
++}
++
++static inline unsigned int
++can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
++{
++ if (offload->inc)
++ return (*val)++;
++ else
++ return (*val)--;
++}
++
++static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
++{
++ struct can_rx_offload *offload = container_of(napi,
++ struct can_rx_offload,
++ napi);
++ struct net_device *dev = offload->dev;
++ struct net_device_stats *stats = &dev->stats;
++ struct sk_buff *skb;
++ int work_done = 0;
++
++ while ((work_done < quota) &&
++ (skb = skb_dequeue(&offload->skb_queue))) {
++ struct can_frame *cf = (struct can_frame *)skb->data;
++
++ work_done++;
++ stats->rx_packets++;
++ stats->rx_bytes += cf->len;
++ netif_receive_skb(skb);
++ }
++
++ if (work_done < quota) {
++ napi_complete_done(napi, work_done);
++
++ /* Check if there was another interrupt */
++ if (!skb_queue_empty(&offload->skb_queue))
++ napi_reschedule(&offload->napi);
++ }
++
++ can_led_event(offload->dev, CAN_LED_EVENT_RX);
++
++ return work_done;
++}
++
++static inline void
++__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
++ int (*compare)(struct sk_buff *a, struct sk_buff *b))
++{
++ struct sk_buff *pos, *insert = NULL;
++
++ skb_queue_reverse_walk(head, pos) {
++ const struct can_rx_offload_cb *cb_pos, *cb_new;
++
++ cb_pos = can_rx_offload_get_cb(pos);
++ cb_new = can_rx_offload_get_cb(new);
++
++ netdev_dbg(new->dev,
++ "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
++ __func__,
++ cb_pos->timestamp, cb_new->timestamp,
++ cb_new->timestamp - cb_pos->timestamp,
++ skb_queue_len(head));
++
++ if (compare(pos, new) < 0)
++ continue;
++ insert = pos;
++ break;
++ }
++ if (!insert)
++ __skb_queue_head(head, new);
++ else
++ __skb_queue_after(head, insert, new);
++}
++
++static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
++{
++ const struct can_rx_offload_cb *cb_a, *cb_b;
++
++ cb_a = can_rx_offload_get_cb(a);
++ cb_b = can_rx_offload_get_cb(b);
++
++ /* Subtract two u32 and return result as int, to keep
++ * difference steady around the u32 overflow.
++ */
++ return cb_b->timestamp - cb_a->timestamp;
++}
++
++/**
++ * can_rx_offload_offload_one() - Read one CAN frame from HW
++ * @offload: pointer to rx_offload context
++ * @n: number of mailbox to read
++ *
++ * The task of this function is to read a CAN frame from mailbox @n
++ * from the device and return the mailbox's content as a struct
++ * sk_buff.
++ *
++ * If the struct can_rx_offload::skb_queue exceeds the maximal queue
++ * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
++ * allocated, the mailbox contents is discarded by reading it into an
++ * overflow buffer. This way the mailbox is marked as free by the
++ * driver.
++ *
++ * Return: A pointer to skb containing the CAN frame on success.
++ *
++ * NULL if the mailbox @n is empty.
++ *
++ * ERR_PTR() in case of an error
++ */
++static struct sk_buff *
++can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
++{
++ struct sk_buff *skb;
++ struct can_rx_offload_cb *cb;
++ bool drop = false;
++ u32 timestamp;
++
++ /* If queue is full drop frame */
++ if (unlikely(skb_queue_len(&offload->skb_queue) >
++ offload->skb_queue_len_max))
++ drop = true;
++
++ skb = offload->mailbox_read(offload, n, &timestamp, drop);
++ /* Mailbox was empty. */
++ if (unlikely(!skb))
++ return NULL;
++
++ /* There was a problem reading the mailbox, propagate
++ * error value.
++ */
++ if (IS_ERR(skb)) {
++ offload->dev->stats.rx_dropped++;
++ offload->dev->stats.rx_fifo_errors++;
++
++ return skb;
++ }
++
++ /* Mailbox was read. */
++ cb = can_rx_offload_get_cb(skb);
++ cb->timestamp = timestamp;
++
++ return skb;
++}
++
++int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
++ u64 pending)
++{
++ struct sk_buff_head skb_queue;
++ unsigned int i;
++
++ __skb_queue_head_init(&skb_queue);
++
++ for (i = offload->mb_first;
++ can_rx_offload_le(offload, i, offload->mb_last);
++ can_rx_offload_inc(offload, &i)) {
++ struct sk_buff *skb;
++
++ if (!(pending & BIT_ULL(i)))
++ continue;
++
++ skb = can_rx_offload_offload_one(offload, i);
++ if (IS_ERR_OR_NULL(skb))
++ continue;
++
++ __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
++ }
++
++ if (!skb_queue_empty(&skb_queue)) {
++ unsigned long flags;
++ u32 queue_len;
++
++ spin_lock_irqsave(&offload->skb_queue.lock, flags);
++ skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
++ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
++
++ queue_len = skb_queue_len(&offload->skb_queue);
++ if (queue_len > offload->skb_queue_len_max / 8)
++ netdev_dbg(offload->dev, "%s: queue_len=%d\n",
++ __func__, queue_len);
++
++ can_rx_offload_schedule(offload);
++ }
++
++ return skb_queue_len(&skb_queue);
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
++
++int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
++{
++ struct sk_buff *skb;
++ int received = 0;
++
++ while (1) {
++ skb = can_rx_offload_offload_one(offload, 0);
++ if (IS_ERR(skb))
++ continue;
++ if (!skb)
++ break;
++
++ skb_queue_tail(&offload->skb_queue, skb);
++ received++;
++ }
++
++ if (received)
++ can_rx_offload_schedule(offload);
++
++ return received;
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
++
++int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
++ struct sk_buff *skb, u32 timestamp)
++{
++ struct can_rx_offload_cb *cb;
++ unsigned long flags;
++
++ if (skb_queue_len(&offload->skb_queue) >
++ offload->skb_queue_len_max) {
++ dev_kfree_skb_any(skb);
++ return -ENOBUFS;
++ }
++
++ cb = can_rx_offload_get_cb(skb);
++ cb->timestamp = timestamp;
++
++ spin_lock_irqsave(&offload->skb_queue.lock, flags);
++ __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
++ spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
++
++ can_rx_offload_schedule(offload);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
++
++unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
++ unsigned int idx, u32 timestamp)
++{
++ struct net_device *dev = offload->dev;
++ struct net_device_stats *stats = &dev->stats;
++ struct sk_buff *skb;
++ u8 len;
++ int err;
++
++ skb = __can_get_echo_skb(dev, idx, &len);
++ if (!skb)
++ return 0;
++
++ err = can_rx_offload_queue_sorted(offload, skb, timestamp);
++ if (err) {
++ stats->rx_errors++;
++ stats->tx_fifo_errors++;
++ }
++
++ return len;
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
++
++int can_rx_offload_queue_tail(struct can_rx_offload *offload,
++ struct sk_buff *skb)
++{
++ if (skb_queue_len(&offload->skb_queue) >
++ offload->skb_queue_len_max) {
++ dev_kfree_skb_any(skb);
++ return -ENOBUFS;
++ }
++
++ skb_queue_tail(&offload->skb_queue, skb);
++ can_rx_offload_schedule(offload);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
++
++static int can_rx_offload_init_queue(struct net_device *dev,
++ struct can_rx_offload *offload,
++ unsigned int weight)
++{
++ offload->dev = dev;
++
++ /* Limit queue len to 4x the weight (rounted to next power of two) */
++ offload->skb_queue_len_max = 2 << fls(weight);
++ offload->skb_queue_len_max *= 4;
++ skb_queue_head_init(&offload->skb_queue);
++
++ netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
++
++ dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
++ __func__, offload->skb_queue_len_max);
++
++ return 0;
++}
++
++int can_rx_offload_add_timestamp(struct net_device *dev,
++ struct can_rx_offload *offload)
++{
++ unsigned int weight;
++
++ if (offload->mb_first > BITS_PER_LONG_LONG ||
++ offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
++ return -EINVAL;
++
++ if (offload->mb_first < offload->mb_last) {
++ offload->inc = true;
++ weight = offload->mb_last - offload->mb_first;
++ } else {
++ offload->inc = false;
++ weight = offload->mb_first - offload->mb_last;
++ }
++
++ return can_rx_offload_init_queue(dev, offload, weight);
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
++
++int can_rx_offload_add_fifo(struct net_device *dev,
++ struct can_rx_offload *offload, unsigned int weight)
++{
++ if (!offload->mailbox_read)
++ return -EINVAL;
++
++ return can_rx_offload_init_queue(dev, offload, weight);
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
++
++int can_rx_offload_add_manual(struct net_device *dev,
++ struct can_rx_offload *offload,
++ unsigned int weight)
++{
++ if (offload->mailbox_read)
++ return -EINVAL;
++
++ return can_rx_offload_init_queue(dev, offload, weight);
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
++
++void can_rx_offload_enable(struct can_rx_offload *offload)
++{
++ napi_enable(&offload->napi);
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_enable);
++
++void can_rx_offload_del(struct can_rx_offload *offload)
++{
++ netif_napi_del(&offload->napi);
++ skb_queue_purge(&offload->skb_queue);
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_del);
+diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
+index 4920de09ffb79..aeac3ce7bfc8f 100644
+--- a/drivers/net/can/m_can/tcan4x5x.c
++++ b/drivers/net/can/m_can/tcan4x5x.c
+@@ -88,7 +88,7 @@
+
+ #define TCAN4X5X_MRAM_START 0x8000
+ #define TCAN4X5X_MCAN_OFFSET 0x1000
+-#define TCAN4X5X_MAX_REGISTER 0x8fff
++#define TCAN4X5X_MAX_REGISTER 0x8ffc
+
+ #define TCAN4X5X_CLEAR_ALL_INT 0xffffffff
+ #define TCAN4X5X_SET_ALL_INT 0xffffffff
+diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
+deleted file mode 100644
+index 3c1912c0430b6..0000000000000
+--- a/drivers/net/can/rx-offload.c
++++ /dev/null
+@@ -1,376 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/* Copyright (c) 2014 Protonic Holland,
+- * David Jander
+- * Copyright (C) 2014-2017 Pengutronix,
+- * Marc Kleine-Budde <kernel@pengutronix.de>
+- */
+-
+-#include <linux/can/dev.h>
+-#include <linux/can/rx-offload.h>
+-
+-struct can_rx_offload_cb {
+- u32 timestamp;
+-};
+-
+-static inline struct can_rx_offload_cb *
+-can_rx_offload_get_cb(struct sk_buff *skb)
+-{
+- BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
+-
+- return (struct can_rx_offload_cb *)skb->cb;
+-}
+-
+-static inline bool
+-can_rx_offload_le(struct can_rx_offload *offload,
+- unsigned int a, unsigned int b)
+-{
+- if (offload->inc)
+- return a <= b;
+- else
+- return a >= b;
+-}
+-
+-static inline unsigned int
+-can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
+-{
+- if (offload->inc)
+- return (*val)++;
+- else
+- return (*val)--;
+-}
+-
+-static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
+-{
+- struct can_rx_offload *offload = container_of(napi,
+- struct can_rx_offload,
+- napi);
+- struct net_device *dev = offload->dev;
+- struct net_device_stats *stats = &dev->stats;
+- struct sk_buff *skb;
+- int work_done = 0;
+-
+- while ((work_done < quota) &&
+- (skb = skb_dequeue(&offload->skb_queue))) {
+- struct can_frame *cf = (struct can_frame *)skb->data;
+-
+- work_done++;
+- stats->rx_packets++;
+- stats->rx_bytes += cf->len;
+- netif_receive_skb(skb);
+- }
+-
+- if (work_done < quota) {
+- napi_complete_done(napi, work_done);
+-
+- /* Check if there was another interrupt */
+- if (!skb_queue_empty(&offload->skb_queue))
+- napi_reschedule(&offload->napi);
+- }
+-
+- can_led_event(offload->dev, CAN_LED_EVENT_RX);
+-
+- return work_done;
+-}
+-
+-static inline void
+-__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
+- int (*compare)(struct sk_buff *a, struct sk_buff *b))
+-{
+- struct sk_buff *pos, *insert = NULL;
+-
+- skb_queue_reverse_walk(head, pos) {
+- const struct can_rx_offload_cb *cb_pos, *cb_new;
+-
+- cb_pos = can_rx_offload_get_cb(pos);
+- cb_new = can_rx_offload_get_cb(new);
+-
+- netdev_dbg(new->dev,
+- "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
+- __func__,
+- cb_pos->timestamp, cb_new->timestamp,
+- cb_new->timestamp - cb_pos->timestamp,
+- skb_queue_len(head));
+-
+- if (compare(pos, new) < 0)
+- continue;
+- insert = pos;
+- break;
+- }
+- if (!insert)
+- __skb_queue_head(head, new);
+- else
+- __skb_queue_after(head, insert, new);
+-}
+-
+-static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
+-{
+- const struct can_rx_offload_cb *cb_a, *cb_b;
+-
+- cb_a = can_rx_offload_get_cb(a);
+- cb_b = can_rx_offload_get_cb(b);
+-
+- /* Subtract two u32 and return result as int, to keep
+- * difference steady around the u32 overflow.
+- */
+- return cb_b->timestamp - cb_a->timestamp;
+-}
+-
+-/**
+- * can_rx_offload_offload_one() - Read one CAN frame from HW
+- * @offload: pointer to rx_offload context
+- * @n: number of mailbox to read
+- *
+- * The task of this function is to read a CAN frame from mailbox @n
+- * from the device and return the mailbox's content as a struct
+- * sk_buff.
+- *
+- * If the struct can_rx_offload::skb_queue exceeds the maximal queue
+- * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
+- * allocated, the mailbox contents is discarded by reading it into an
+- * overflow buffer. This way the mailbox is marked as free by the
+- * driver.
+- *
+- * Return: A pointer to skb containing the CAN frame on success.
+- *
+- * NULL if the mailbox @n is empty.
+- *
+- * ERR_PTR() in case of an error
+- */
+-static struct sk_buff *
+-can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
+-{
+- struct sk_buff *skb;
+- struct can_rx_offload_cb *cb;
+- bool drop = false;
+- u32 timestamp;
+-
+- /* If queue is full drop frame */
+- if (unlikely(skb_queue_len(&offload->skb_queue) >
+- offload->skb_queue_len_max))
+- drop = true;
+-
+- skb = offload->mailbox_read(offload, n, &timestamp, drop);
+- /* Mailbox was empty. */
+- if (unlikely(!skb))
+- return NULL;
+-
+- /* There was a problem reading the mailbox, propagate
+- * error value.
+- */
+- if (IS_ERR(skb)) {
+- offload->dev->stats.rx_dropped++;
+- offload->dev->stats.rx_fifo_errors++;
+-
+- return skb;
+- }
+-
+- /* Mailbox was read. */
+- cb = can_rx_offload_get_cb(skb);
+- cb->timestamp = timestamp;
+-
+- return skb;
+-}
+-
+-int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
+- u64 pending)
+-{
+- struct sk_buff_head skb_queue;
+- unsigned int i;
+-
+- __skb_queue_head_init(&skb_queue);
+-
+- for (i = offload->mb_first;
+- can_rx_offload_le(offload, i, offload->mb_last);
+- can_rx_offload_inc(offload, &i)) {
+- struct sk_buff *skb;
+-
+- if (!(pending & BIT_ULL(i)))
+- continue;
+-
+- skb = can_rx_offload_offload_one(offload, i);
+- if (IS_ERR_OR_NULL(skb))
+- continue;
+-
+- __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
+- }
+-
+- if (!skb_queue_empty(&skb_queue)) {
+- unsigned long flags;
+- u32 queue_len;
+-
+- spin_lock_irqsave(&offload->skb_queue.lock, flags);
+- skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
+- spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+-
+- queue_len = skb_queue_len(&offload->skb_queue);
+- if (queue_len > offload->skb_queue_len_max / 8)
+- netdev_dbg(offload->dev, "%s: queue_len=%d\n",
+- __func__, queue_len);
+-
+- can_rx_offload_schedule(offload);
+- }
+-
+- return skb_queue_len(&skb_queue);
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
+-
+-int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
+-{
+- struct sk_buff *skb;
+- int received = 0;
+-
+- while (1) {
+- skb = can_rx_offload_offload_one(offload, 0);
+- if (IS_ERR(skb))
+- continue;
+- if (!skb)
+- break;
+-
+- skb_queue_tail(&offload->skb_queue, skb);
+- received++;
+- }
+-
+- if (received)
+- can_rx_offload_schedule(offload);
+-
+- return received;
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
+-
+-int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+- struct sk_buff *skb, u32 timestamp)
+-{
+- struct can_rx_offload_cb *cb;
+- unsigned long flags;
+-
+- if (skb_queue_len(&offload->skb_queue) >
+- offload->skb_queue_len_max) {
+- dev_kfree_skb_any(skb);
+- return -ENOBUFS;
+- }
+-
+- cb = can_rx_offload_get_cb(skb);
+- cb->timestamp = timestamp;
+-
+- spin_lock_irqsave(&offload->skb_queue.lock, flags);
+- __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
+- spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+-
+- can_rx_offload_schedule(offload);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
+-
+-unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+- unsigned int idx, u32 timestamp)
+-{
+- struct net_device *dev = offload->dev;
+- struct net_device_stats *stats = &dev->stats;
+- struct sk_buff *skb;
+- u8 len;
+- int err;
+-
+- skb = __can_get_echo_skb(dev, idx, &len);
+- if (!skb)
+- return 0;
+-
+- err = can_rx_offload_queue_sorted(offload, skb, timestamp);
+- if (err) {
+- stats->rx_errors++;
+- stats->tx_fifo_errors++;
+- }
+-
+- return len;
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
+-
+-int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+- struct sk_buff *skb)
+-{
+- if (skb_queue_len(&offload->skb_queue) >
+- offload->skb_queue_len_max) {
+- dev_kfree_skb_any(skb);
+- return -ENOBUFS;
+- }
+-
+- skb_queue_tail(&offload->skb_queue, skb);
+- can_rx_offload_schedule(offload);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
+-
+-static int can_rx_offload_init_queue(struct net_device *dev,
+- struct can_rx_offload *offload,
+- unsigned int weight)
+-{
+- offload->dev = dev;
+-
+- /* Limit queue len to 4x the weight (rounted to next power of two) */
+- offload->skb_queue_len_max = 2 << fls(weight);
+- offload->skb_queue_len_max *= 4;
+- skb_queue_head_init(&offload->skb_queue);
+-
+- netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
+-
+- dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
+- __func__, offload->skb_queue_len_max);
+-
+- return 0;
+-}
+-
+-int can_rx_offload_add_timestamp(struct net_device *dev,
+- struct can_rx_offload *offload)
+-{
+- unsigned int weight;
+-
+- if (offload->mb_first > BITS_PER_LONG_LONG ||
+- offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
+- return -EINVAL;
+-
+- if (offload->mb_first < offload->mb_last) {
+- offload->inc = true;
+- weight = offload->mb_last - offload->mb_first;
+- } else {
+- offload->inc = false;
+- weight = offload->mb_first - offload->mb_last;
+- }
+-
+- return can_rx_offload_init_queue(dev, offload, weight);
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
+-
+-int can_rx_offload_add_fifo(struct net_device *dev,
+- struct can_rx_offload *offload, unsigned int weight)
+-{
+- if (!offload->mailbox_read)
+- return -EINVAL;
+-
+- return can_rx_offload_init_queue(dev, offload, weight);
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
+-
+-int can_rx_offload_add_manual(struct net_device *dev,
+- struct can_rx_offload *offload,
+- unsigned int weight)
+-{
+- if (offload->mailbox_read)
+- return -EINVAL;
+-
+- return can_rx_offload_init_queue(dev, offload, weight);
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
+-
+-void can_rx_offload_enable(struct can_rx_offload *offload)
+-{
+- napi_enable(&offload->napi);
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_enable);
+-
+-void can_rx_offload_del(struct can_rx_offload *offload)
+-{
+- netif_napi_del(&offload->napi);
+- skb_queue_purge(&offload->skb_queue);
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_del);
+diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
+index a1bd1be09548d..30c8d53c9745d 100644
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -516,6 +516,7 @@ static struct slcan *slc_alloc(void)
+ int i;
+ char name[IFNAMSIZ];
+ struct net_device *dev = NULL;
++ struct can_ml_priv *can_ml;
+ struct slcan *sl;
+ int size;
+
+@@ -538,7 +539,8 @@ static struct slcan *slc_alloc(void)
+
+ dev->base_addr = i;
+ sl = netdev_priv(dev);
+- dev->ml_priv = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
++ can_ml = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
++ can_set_ml_priv(dev, can_ml);
+
+ /* Initialize channel control data */
+ sl->magic = SLCAN_MAGIC;
+diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
+index 39ca14b0585dc..067705e2850b3 100644
+--- a/drivers/net/can/vcan.c
++++ b/drivers/net/can/vcan.c
+@@ -153,7 +153,7 @@ static void vcan_setup(struct net_device *dev)
+ dev->addr_len = 0;
+ dev->tx_queue_len = 0;
+ dev->flags = IFF_NOARP;
+- dev->ml_priv = netdev_priv(dev);
++ can_set_ml_priv(dev, netdev_priv(dev));
+
+ /* set flags according to driver capabilities */
+ if (echo)
+diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
+index f9a524c5f6d62..8861a7d875e7e 100644
+--- a/drivers/net/can/vxcan.c
++++ b/drivers/net/can/vxcan.c
+@@ -141,6 +141,8 @@ static const struct net_device_ops vxcan_netdev_ops = {
+
+ static void vxcan_setup(struct net_device *dev)
+ {
++ struct can_ml_priv *can_ml;
++
+ dev->type = ARPHRD_CAN;
+ dev->mtu = CANFD_MTU;
+ dev->hard_header_len = 0;
+@@ -149,7 +151,9 @@ static void vxcan_setup(struct net_device *dev)
+ dev->flags = (IFF_NOARP|IFF_ECHO);
+ dev->netdev_ops = &vxcan_netdev_ops;
+ dev->needs_free_netdev = true;
+- dev->ml_priv = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
++
++ can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
++ can_set_ml_priv(dev, can_ml);
+ }
+
+ /* forward declaration for rtnl_create_link() */
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+index 8f70a3909929a..4af0cd9530de6 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+@@ -71,8 +71,10 @@ static int aq_ndev_open(struct net_device *ndev)
+ goto err_exit;
+
+ err = aq_nic_start(aq_nic);
+- if (err < 0)
++ if (err < 0) {
++ aq_nic_stop(aq_nic);
+ goto err_exit;
++ }
+
+ err_exit:
+ if (err < 0)
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 358119d983582..e6f9b5345b70b 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -1153,7 +1153,7 @@ static void mvpp2_interrupts_unmask(void *arg)
+ u32 val;
+
+ /* If the thread isn't used, don't do anything */
+- if (smp_processor_id() > port->priv->nthreads)
++ if (smp_processor_id() >= port->priv->nthreads)
+ return;
+
+ val = MVPP2_CAUSE_MISC_SUM_MASK |
+@@ -2287,7 +2287,7 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
+ int queue;
+
+ /* If the thread isn't used, don't do anything */
+- if (smp_processor_id() > port->priv->nthreads)
++ if (smp_processor_id() >= port->priv->nthreads)
+ return;
+
+ for (queue = 0; queue < port->ntxqs; queue++) {
+diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
+index b77f5fef7aeca..febfac75dd6a1 100644
+--- a/drivers/net/ipa/gsi.c
++++ b/drivers/net/ipa/gsi.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+
+ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+- * Copyright (C) 2018-2020 Linaro Ltd.
++ * Copyright (C) 2018-2021 Linaro Ltd.
+ */
+
+ #include <linux/types.h>
+@@ -195,8 +195,6 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
+ /* Turn off all GSI interrupts initially */
+ static void gsi_irq_setup(struct gsi *gsi)
+ {
+- u32 adjust;
+-
+ /* Disable all interrupt types */
+ gsi_irq_type_update(gsi, 0);
+
+@@ -206,10 +204,9 @@ static void gsi_irq_setup(struct gsi *gsi)
+ iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+
+- /* Reverse the offset adjustment for inter-EE register offsets */
+- adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
+- iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
+- iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
++ /* The inter-EE registers are in the non-adjusted address range */
++ iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
++ iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
+
+ iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+ }
+@@ -2115,9 +2112,8 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ gsi->dev = dev;
+ gsi->version = version;
+
+- /* The GSI layer performs NAPI on all endpoints. NAPI requires a
+- * network device structure, but the GSI layer does not have one,
+- * so we must create a dummy network device for this purpose.
++ /* GSI uses NAPI on all channels. Create a dummy network device
++ * for the channel NAPI contexts to be associated with.
+ */
+ init_dummy_netdev(&gsi->dummy_dev);
+
+@@ -2142,13 +2138,13 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ return -EINVAL;
+ }
+
+- gsi->virt = ioremap(res->start, size);
+- if (!gsi->virt) {
++ gsi->virt_raw = ioremap(res->start, size);
++ if (!gsi->virt_raw) {
+ dev_err(dev, "unable to remap \"gsi\" memory\n");
+ return -ENOMEM;
+ }
+- /* Adjust register range pointer downward for newer IPA versions */
+- gsi->virt -= adjust;
++ /* Most registers are accessed using an adjusted register range */
++ gsi->virt = gsi->virt_raw - adjust;
+
+ init_completion(&gsi->completion);
+
+@@ -2167,7 +2163,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ err_irq_exit:
+ gsi_irq_exit(gsi);
+ err_iounmap:
+- iounmap(gsi->virt);
++ iounmap(gsi->virt_raw);
+
+ return ret;
+ }
+@@ -2178,7 +2174,7 @@ void gsi_exit(struct gsi *gsi)
+ mutex_destroy(&gsi->mutex);
+ gsi_channel_exit(gsi);
+ gsi_irq_exit(gsi);
+- iounmap(gsi->virt);
++ iounmap(gsi->virt_raw);
+ }
+
+ /* The maximum number of outstanding TREs on a channel. This limits
+diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
+index 96c9aed397aad..696c9825834ab 100644
+--- a/drivers/net/ipa/gsi.h
++++ b/drivers/net/ipa/gsi.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+
+ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+- * Copyright (C) 2018-2020 Linaro Ltd.
++ * Copyright (C) 2018-2021 Linaro Ltd.
+ */
+ #ifndef _GSI_H_
+ #define _GSI_H_
+@@ -150,7 +150,8 @@ struct gsi {
+ struct device *dev; /* Same as IPA device */
+ enum ipa_version version;
+ struct net_device dummy_dev; /* needed for NAPI */
+- void __iomem *virt;
++ void __iomem *virt_raw; /* I/O mapped address range */
++ void __iomem *virt; /* Adjusted for most registers */
+ u32 irq;
+ u32 channel_count;
+ u32 evt_ring_count;
+diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
+index 0e138bbd82053..1622d8cf8dea4 100644
+--- a/drivers/net/ipa/gsi_reg.h
++++ b/drivers/net/ipa/gsi_reg.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+
+ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+- * Copyright (C) 2018-2020 Linaro Ltd.
++ * Copyright (C) 2018-2021 Linaro Ltd.
+ */
+ #ifndef _GSI_REG_H_
+ #define _GSI_REG_H_
+@@ -38,17 +38,21 @@
+ * (though the actual limit is hardware-dependent).
+ */
+
+-/* GSI EE registers as a group are shifted downward by a fixed
+- * constant amount for IPA versions 4.5 and beyond. This applies
+- * to all GSI registers we use *except* the ones that disable
+- * inter-EE interrupts for channels and event channels.
++/* GSI EE registers as a group are shifted downward by a fixed constant amount
++ * for IPA versions 4.5 and beyond. This applies to all GSI registers we use
++ * *except* the ones that disable inter-EE interrupts for channels and event
++ * channels.
+ *
+- * We handle this by adjusting the pointer to the mapped GSI memory
+- * region downward. Then in the one place we use them (gsi_irq_setup())
+- * we undo that adjustment for the inter-EE interrupt registers.
++ * The "raw" (not adjusted) GSI register range is mapped, and a pointer to
++ * the mapped range is held in gsi->virt_raw. The inter-EE interrupt
++ * registers are accessed using that pointer.
++ *
++ * Most registers are accessed using gsi->virt, which is a copy of the "raw"
++ * pointer, adjusted downward by the fixed amount.
+ */
+ #define GSI_EE_REG_ADJUST 0x0000d000 /* IPA v4.5+ */
+
++/* The two inter-EE IRQ register offsets are relative to gsi->virt_raw */
+ #define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
+ GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
+ #define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
+@@ -59,16 +63,7 @@
+ #define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
+ (0x0000c01c + 0x1000 * (ee))
+
+-#define GSI_INTER_EE_SRC_CH_IRQ_CLR_OFFSET \
+- GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+-#define GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(ee) \
+- (0x0000c028 + 0x1000 * (ee))
+-
+-#define GSI_INTER_EE_SRC_EV_CH_IRQ_CLR_OFFSET \
+- GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+-#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
+- (0x0000c02c + 0x1000 * (ee))
+-
++/* All other register offsets are relative to gsi->virt */
+ #define GSI_CH_C_CNTXT_0_OFFSET(ch) \
+ GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
+ #define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \
+diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
+index 002e514485100..eb65a11e33eaf 100644
+--- a/drivers/net/ipa/ipa_cmd.c
++++ b/drivers/net/ipa/ipa_cmd.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+
+ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+- * Copyright (C) 2019-2020 Linaro Ltd.
++ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+
+ #include <linux/types.h>
+@@ -244,11 +244,15 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
+ if (ipa->version != IPA_VERSION_3_5_1)
+ bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
+ BUILD_BUG_ON(bit_count > 32);
+- offset_max = ~0 >> (32 - bit_count);
++ offset_max = ~0U >> (32 - bit_count);
+
++ /* Make sure the offset can be represented by the field(s)
++ * that holds it. Also make sure the offset is not outside
++ * the overall IPA memory range.
++ */
+ if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
+ dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
+- ipa->mem_offset + offset, offset_max);
++ name, ipa->mem_offset, offset, offset_max);
+ return false;
+ }
+
+@@ -261,12 +265,24 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
+ const char *name;
+ u32 offset;
+
+- offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+- name = "filter/route hash flush";
+- if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+- return false;
++ /* If hashed tables are supported, ensure the hash flush register
++ * offset will fit in a register write IPA immediate command.
++ */
++ if (ipa->version != IPA_VERSION_4_2) {
++ offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
++ name = "filter/route hash flush";
++ if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
++ return false;
++ }
+
+- offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT);
++ /* Each endpoint can have a status endpoint associated with it,
++ * and this is recorded in an endpoint register. If the modem
++ * crashes, we reset the status endpoint for all modem endpoints
++ * using a register write IPA immediate command. Make sure the
++ * worst case (highest endpoint number) offset of that endpoint
++ * fits in the register write command field(s) that must hold it.
++ */
++ offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
+ name = "maximal endpoint status";
+ if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+ return false;
+diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
+index 816af1f55e2cd..dbeb29fa16e81 100644
+--- a/drivers/net/netdevsim/dev.c
++++ b/drivers/net/netdevsim/dev.c
+@@ -1012,23 +1012,25 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
+ nsim_dev->fw_update_status = true;
+ nsim_dev->fw_update_overwrite_mask = 0;
+
+- nsim_dev->fib_data = nsim_fib_create(devlink, extack);
+- if (IS_ERR(nsim_dev->fib_data))
+- return PTR_ERR(nsim_dev->fib_data);
+-
+ nsim_devlink_param_load_driverinit_values(devlink);
+
+ err = nsim_dev_dummy_region_init(nsim_dev, devlink);
+ if (err)
+- goto err_fib_destroy;
++ return err;
+
+ err = nsim_dev_traps_init(devlink);
+ if (err)
+ goto err_dummy_region_exit;
+
++ nsim_dev->fib_data = nsim_fib_create(devlink, extack);
++ if (IS_ERR(nsim_dev->fib_data)) {
++ err = PTR_ERR(nsim_dev->fib_data);
++ goto err_traps_exit;
++ }
++
+ err = nsim_dev_health_init(nsim_dev, devlink);
+ if (err)
+- goto err_traps_exit;
++ goto err_fib_destroy;
+
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+@@ -1043,12 +1045,12 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
+
+ err_health_exit:
+ nsim_dev_health_exit(nsim_dev);
++err_fib_destroy:
++ nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ err_traps_exit:
+ nsim_dev_traps_exit(devlink);
+ err_dummy_region_exit:
+ nsim_dev_dummy_region_exit(nsim_dev);
+-err_fib_destroy:
+- nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ return err;
+ }
+
+@@ -1080,15 +1082,9 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
+ if (err)
+ goto err_devlink_free;
+
+- nsim_dev->fib_data = nsim_fib_create(devlink, NULL);
+- if (IS_ERR(nsim_dev->fib_data)) {
+- err = PTR_ERR(nsim_dev->fib_data);
+- goto err_resources_unregister;
+- }
+-
+ err = devlink_register(devlink, &nsim_bus_dev->dev);
+ if (err)
+- goto err_fib_destroy;
++ goto err_resources_unregister;
+
+ err = devlink_params_register(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+@@ -1108,9 +1104,15 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
+ if (err)
+ goto err_traps_exit;
+
++ nsim_dev->fib_data = nsim_fib_create(devlink, NULL);
++ if (IS_ERR(nsim_dev->fib_data)) {
++ err = PTR_ERR(nsim_dev->fib_data);
++ goto err_debugfs_exit;
++ }
++
+ err = nsim_dev_health_init(nsim_dev, devlink);
+ if (err)
+- goto err_debugfs_exit;
++ goto err_fib_destroy;
+
+ err = nsim_bpf_dev_init(nsim_dev);
+ if (err)
+@@ -1128,6 +1130,8 @@ err_bpf_dev_exit:
+ nsim_bpf_dev_exit(nsim_dev);
+ err_health_exit:
+ nsim_dev_health_exit(nsim_dev);
++err_fib_destroy:
++ nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ err_debugfs_exit:
+ nsim_dev_debugfs_exit(nsim_dev);
+ err_traps_exit:
+@@ -1139,8 +1143,6 @@ err_params_unregister:
+ ARRAY_SIZE(nsim_devlink_params));
+ err_dl_unregister:
+ devlink_unregister(devlink);
+-err_fib_destroy:
+- nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ err_resources_unregister:
+ devlink_resources_unregister(devlink, NULL);
+ err_devlink_free:
+@@ -1157,10 +1159,10 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
+ debugfs_remove(nsim_dev->take_snapshot);
+ nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_health_exit(nsim_dev);
++ nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ nsim_dev_traps_exit(devlink);
+ nsim_dev_dummy_region_exit(nsim_dev);
+ mutex_destroy(&nsim_dev->port_list_lock);
+- nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ }
+
+ void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
+diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
+index 93c7e8502845f..ebb568f9bc667 100644
+--- a/drivers/net/wan/lmc/lmc_main.c
++++ b/drivers/net/wan/lmc/lmc_main.c
+@@ -899,6 +899,8 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ break;
+ default:
+ printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
++ unregister_hdlc_device(dev);
++ return -EIO;
+ break;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+index e6135795719a1..e7072fc4f487a 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+@@ -576,13 +576,13 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
+ case WMI_TDLS_TEARDOWN_REASON_TX:
+ case WMI_TDLS_TEARDOWN_REASON_RSSI:
+ case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
++ rcu_read_lock();
+ station = ieee80211_find_sta_by_ifaddr(ar->hw,
+ ev->peer_macaddr.addr,
+ NULL);
+ if (!station) {
+ ath10k_warn(ar, "did not find station from tdls peer event");
+- kfree(tb);
+- return;
++ goto exit;
+ }
+ arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
+ ieee80211_tdls_oper_request(
+@@ -593,6 +593,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
+ );
+ break;
+ }
++
++exit:
++ rcu_read_unlock();
+ kfree(tb);
+ }
+
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 54bdef33f3f85..55ecf7f437354 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -6361,17 +6361,20 @@ static int __ath11k_mac_register(struct ath11k *ar)
+ ret = ath11k_regd_update(ar, true);
+ if (ret) {
+ ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
+- goto err_free_if_combs;
++ goto err_unregister_hw;
+ }
+
+ ret = ath11k_debugfs_register(ar);
+ if (ret) {
+ ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
+- goto err_free_if_combs;
++ goto err_unregister_hw;
+ }
+
+ return 0;
+
++err_unregister_hw:
++ ieee80211_unregister_hw(ar->hw);
++
+ err_free_if_combs:
+ kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ kfree(ar->hw->wiphy->iface_combinations);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index 0ee421f30aa24..23e6422c2251b 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -5611,7 +5611,8 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif,
+ return false;
+ }
+
+-static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
++static bool brcmf_is_linkdown(struct brcmf_cfg80211_vif *vif,
++ const struct brcmf_event_msg *e)
+ {
+ u32 event = e->event_code;
+ u16 flags = e->flags;
+@@ -5620,6 +5621,8 @@ static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
+ (event == BRCMF_E_DISASSOC_IND) ||
+ ((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) {
+ brcmf_dbg(CONN, "Processing link down\n");
++ clear_bit(BRCMF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state);
++ clear_bit(BRCMF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state);
+ return true;
+ }
+ return false;
+@@ -6067,7 +6070,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
+ } else
+ brcmf_bss_connect_done(cfg, ndev, e, true);
+ brcmf_net_setcarrier(ifp, true);
+- } else if (brcmf_is_linkdown(e)) {
++ } else if (brcmf_is_linkdown(ifp->vif, e)) {
+ brcmf_dbg(CONN, "Linkdown\n");
+ if (!brcmf_is_ibssmode(ifp->vif) &&
+ test_bit(BRCMF_VIF_STATUS_CONNECTED,
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index ab93a848a4667..e71bc97cb40e7 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1972,7 +1972,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
+ int ret;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+- spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
++ spin_lock_bh(&trans_pcie->reg_lock);
+
+ if (trans_pcie->cmd_hold_nic_awake)
+ goto out;
+@@ -2057,7 +2057,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
+ }
+
+ err:
+- spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
++ spin_unlock_bh(&trans_pcie->reg_lock);
+ return false;
+ }
+
+@@ -2095,7 +2095,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
+ * scheduled on different CPUs (after we drop reg_lock).
+ */
+ out:
+- spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
++ spin_unlock_bh(&trans_pcie->reg_lock);
+ }
+
+ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+@@ -2296,11 +2296,10 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value)
+ {
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+- unsigned long flags;
+
+- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
++ spin_lock_bh(&trans_pcie->reg_lock);
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
++ spin_unlock_bh(&trans_pcie->reg_lock);
+ }
+
+ static const char *get_csr_string(int cmd)
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+index 8757246a90d53..b9afd9b04042a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+@@ -31,7 +31,6 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+- unsigned long flags;
+ void *dup_buf = NULL;
+ dma_addr_t phys_addr;
+ int i, cmd_pos, idx;
+@@ -244,11 +243,11 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
++ spin_lock(&trans_pcie->reg_lock);
+ /* Increment and update queue's write index */
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ iwl_txq_inc_wr_ptr(trans, txq);
+- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
++ spin_unlock(&trans_pcie->reg_lock);
+
+ out:
+ spin_unlock_bh(&txq->lock);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+index 83f4964f3cb29..689f51968049a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+@@ -223,12 +223,10 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
+
+ if (txq->read_ptr == txq->write_ptr) {
+- unsigned long flags;
+-
+- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
++ spin_lock(&trans_pcie->reg_lock);
+ if (txq_id == trans->txqs.cmd.q_id)
+ iwl_pcie_clear_cmd_in_flight(trans);
+- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
++ spin_unlock(&trans_pcie->reg_lock);
+ }
+ }
+
+@@ -679,7 +677,6 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+ {
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
+- unsigned long flags;
+ int nfreed = 0;
+ u16 r;
+
+@@ -710,9 +707,10 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+ }
+
+ if (txq->read_ptr == txq->write_ptr) {
+- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
++ /* BHs are also disabled due to txq->lock */
++ spin_lock(&trans_pcie->reg_lock);
+ iwl_pcie_clear_cmd_in_flight(trans);
+- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
++ spin_unlock(&trans_pcie->reg_lock);
+ }
+
+ iwl_txq_progress(txq);
+@@ -921,7 +919,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+- unsigned long flags;
+ void *dup_buf = NULL;
+ dma_addr_t phys_addr;
+ int idx;
+@@ -1164,20 +1161,19 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
++ spin_lock(&trans_pcie->reg_lock);
+ ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
+ if (ret < 0) {
+ idx = ret;
+- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+- goto out;
++ goto unlock_reg;
+ }
+
+ /* Increment and update queue's write index */
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+
+- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+-
++ unlock_reg:
++ spin_unlock(&trans_pcie->reg_lock);
+ out:
+ spin_unlock_bh(&txq->lock);
+ free_dup_buf:
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+index fbfd85439d1ff..88fb49486ee09 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+@@ -719,8 +719,8 @@ static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
+ regval = (!polarity_inverse ? 0x1 : 0x2);
+ }
+
+- rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
+- regval);
++ rtw_write32_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
++ regval);
+ break;
+ case COEX_SWITCH_CTRL_BY_PTA:
+ rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN);
+@@ -730,8 +730,8 @@ static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
+ PTA_CTRL_PIN);
+
+ regval = (!polarity_inverse ? 0x2 : 0x1);
+- rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
+- regval);
++ rtw_write32_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
++ regval);
+ break;
+ case COEX_SWITCH_CTRL_BY_ANTDIV:
+ rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN);
+@@ -757,11 +757,11 @@ static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
+ }
+
+ if (ctrl_type == COEX_SWITCH_CTRL_BY_BT) {
+- rtw_write32_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
+- rtw_write32_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
++ rtw_write8_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
++ rtw_write8_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
+ } else {
+- rtw_write32_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
+- rtw_write32_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
++ rtw_write8_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
++ rtw_write8_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
+ }
+ }
+
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 8b0485ada315b..d658c6e8263af 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -1098,11 +1098,11 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
+ cmd->rbytes_done += ret;
+ }
+
++ nvmet_tcp_unmap_pdu_iovec(cmd);
+ if (queue->data_digest) {
+ nvmet_tcp_prep_recv_ddgst(cmd);
+ return 0;
+ }
+- nvmet_tcp_unmap_pdu_iovec(cmd);
+
+ if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
+ cmd->rbytes_done == cmd->req.transfer_len) {
+diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
+index f35edb0eac405..c12fa57ebd12c 100644
+--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
++++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
+@@ -572,7 +572,7 @@ static void microchip_sgpio_irq_settype(struct irq_data *data,
+ /* Type value spread over 2 registers sets: low, high bit */
+ sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, addr.bit,
+ BIT(addr.port), (!!(type & 0x1)) << addr.port);
+- sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER + SGPIO_MAX_BITS, addr.bit,
++ sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, SGPIO_MAX_BITS + addr.bit,
+ BIT(addr.port), (!!(type & 0x2)) << addr.port);
+
+ if (type == SGPIO_INT_TRG_LEVEL)
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
+index aa1a1c850d057..53a0badc6b035 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -3727,12 +3727,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev)
+ static int __maybe_unused rockchip_pinctrl_resume(struct device *dev)
+ {
+ struct rockchip_pinctrl *info = dev_get_drvdata(dev);
+- int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
+- rk3288_grf_gpio6c_iomux |
+- GPIO6C6_SEL_WRITE_ENABLE);
++ int ret;
+
+- if (ret)
+- return ret;
++ if (info->ctrl->type == RK3288) {
++ ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
++ rk3288_grf_gpio6c_iomux |
++ GPIO6C6_SEL_WRITE_ENABLE);
++ if (ret)
++ return ret;
++ }
+
+ return pinctrl_force_default(info->pctl_dev);
+ }
+diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+index 369ee20a7ea95..2f19ab4db7208 100644
+--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
++++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+@@ -392,7 +392,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
+ unsigned long *configs, unsigned int nconfs)
+ {
+ struct lpi_pinctrl *pctrl = dev_get_drvdata(pctldev->dev);
+- unsigned int param, arg, pullup, strength;
++ unsigned int param, arg, pullup = LPI_GPIO_BIAS_DISABLE, strength = 2;
+ bool value, output_enabled = false;
+ const struct lpi_pingroup *g;
+ unsigned long sval;
+diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
+index 8daccd5302854..9d41abfca37ea 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
++++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
+@@ -1439,14 +1439,14 @@ static const struct msm_pingroup sc7280_groups[] = {
+ [172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, _),
+ [173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, _),
+ [174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, _),
+- [175] = UFS_RESET(ufs_reset, 0x1be000),
+- [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x1b3000, 15, 0),
+- [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0x1b3000, 13, 6),
+- [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x1b3000, 11, 3),
+- [179] = SDC_QDSD_PINGROUP(sdc1_data, 0x1b3000, 9, 0),
+- [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1b4000, 14, 6),
+- [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1b4000, 11, 3),
+- [182] = SDC_QDSD_PINGROUP(sdc2_data, 0x1b4000, 9, 0),
++ [175] = UFS_RESET(ufs_reset, 0xbe000),
++ [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xb3004, 0, 6),
++ [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0xb3000, 13, 6),
++ [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0xb3000, 11, 3),
++ [179] = SDC_QDSD_PINGROUP(sdc1_data, 0xb3000, 9, 0),
++ [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0xb4000, 14, 6),
++ [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xb4000, 11, 3),
++ [182] = SDC_QDSD_PINGROUP(sdc2_data, 0xb4000, 9, 0),
+ };
+
+ static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
+diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
+index 2b5b0e2b03add..5aaf57b40407f 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c
++++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
+@@ -423,7 +423,7 @@ static const char * const gpio_groups[] = {
+
+ static const char * const qdss_stm_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio12", "gpio13",
+- "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19" "gpio20", "gpio21", "gpio22",
++ "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23", "gpio44", "gpio45", "gpio52", "gpio53", "gpio56", "gpio57", "gpio61", "gpio62",
+ "gpio63", "gpio64", "gpio65", "gpio66",
+ };
+diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
+index 10e5e6c8087dc..01620f3eab39f 100644
+--- a/drivers/scsi/qla2xxx/qla_target.h
++++ b/drivers/scsi/qla2xxx/qla_target.h
+@@ -116,7 +116,6 @@
+ (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
+ QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
+ #endif
+-#endif
+
+ #define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
+ ? le16_to_cpu((iocb)->u.isp2x.target.extended) \
+@@ -244,6 +243,7 @@ struct ctio_to_2xxx {
+ #ifndef CTIO_RET_TYPE
+ #define CTIO_RET_TYPE 0x17 /* CTIO return entry */
+ #define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
++#endif
+
+ struct fcp_hdr {
+ uint8_t r_ctl;
+diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
+index 43f7624508a96..8b10fa4e381a4 100644
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -1269,8 +1269,8 @@ static int st_open(struct inode *inode, struct file *filp)
+ spin_lock(&st_use_lock);
+ if (STp->in_use) {
+ spin_unlock(&st_use_lock);
+- scsi_tape_put(STp);
+ DEBC_printk(STp, "Device already in use.\n");
++ scsi_tape_put(STp);
+ return (-EBUSY);
+ }
+
+diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
+index f42954e2c98e4..1fd29f93ff6d6 100644
+--- a/drivers/soc/qcom/qcom-geni-se.c
++++ b/drivers/soc/qcom/qcom-geni-se.c
+@@ -3,7 +3,6 @@
+
+ #include <linux/acpi.h>
+ #include <linux/clk.h>
+-#include <linux/console.h>
+ #include <linux/slab.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/io.h>
+@@ -92,14 +91,11 @@ struct geni_wrapper {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
+- struct geni_icc_path to_core;
+ };
+
+ static const char * const icc_path_names[] = {"qup-core", "qup-config",
+ "qup-memory"};
+
+-static struct geni_wrapper *earlycon_wrapper;
+-
+ #define QUP_HW_VER_REG 0x4
+
+ /* Common SE registers */
+@@ -843,44 +839,11 @@ int geni_icc_disable(struct geni_se *se)
+ }
+ EXPORT_SYMBOL(geni_icc_disable);
+
+-void geni_remove_earlycon_icc_vote(void)
+-{
+- struct platform_device *pdev;
+- struct geni_wrapper *wrapper;
+- struct device_node *parent;
+- struct device_node *child;
+-
+- if (!earlycon_wrapper)
+- return;
+-
+- wrapper = earlycon_wrapper;
+- parent = of_get_next_parent(wrapper->dev->of_node);
+- for_each_child_of_node(parent, child) {
+- if (!of_device_is_compatible(child, "qcom,geni-se-qup"))
+- continue;
+-
+- pdev = of_find_device_by_node(child);
+- if (!pdev)
+- continue;
+-
+- wrapper = platform_get_drvdata(pdev);
+- icc_put(wrapper->to_core.path);
+- wrapper->to_core.path = NULL;
+-
+- }
+- of_node_put(parent);
+-
+- earlycon_wrapper = NULL;
+-}
+-EXPORT_SYMBOL(geni_remove_earlycon_icc_vote);
+-
+ static int geni_se_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct geni_wrapper *wrapper;
+- struct console __maybe_unused *bcon;
+- bool __maybe_unused has_earlycon = false;
+ int ret;
+
+ wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL);
+@@ -903,43 +866,6 @@ static int geni_se_probe(struct platform_device *pdev)
+ }
+ }
+
+-#ifdef CONFIG_SERIAL_EARLYCON
+- for_each_console(bcon) {
+- if (!strcmp(bcon->name, "qcom_geni")) {
+- has_earlycon = true;
+- break;
+- }
+- }
+- if (!has_earlycon)
+- goto exit;
+-
+- wrapper->to_core.path = devm_of_icc_get(dev, "qup-core");
+- if (IS_ERR(wrapper->to_core.path))
+- return PTR_ERR(wrapper->to_core.path);
+- /*
+- * Put minmal BW request on core clocks on behalf of early console.
+- * The vote will be removed earlycon exit function.
+- *
+- * Note: We are putting vote on each QUP wrapper instead only to which
+- * earlycon is connected because QUP core clock of different wrapper
+- * share same voltage domain. If core1 is put to 0, then core2 will
+- * also run at 0, if not voted. Default ICC vote will be removed ASA
+- * we touch any of the core clock.
+- * core1 = core2 = max(core1, core2)
+- */
+- ret = icc_set_bw(wrapper->to_core.path, GENI_DEFAULT_BW,
+- GENI_DEFAULT_BW);
+- if (ret) {
+- dev_err(&pdev->dev, "%s: ICC BW voting failed for core: %d\n",
+- __func__, ret);
+- return ret;
+- }
+-
+- if (of_get_compatible_child(pdev->dev.of_node, "qcom,geni-debug-uart"))
+- earlycon_wrapper = wrapper;
+- of_node_put(pdev->dev.of_node);
+-exit:
+-#endif
+ dev_set_drvdata(dev, wrapper);
+ dev_dbg(dev, "GENI SE Driver probed\n");
+ return devm_of_platform_populate(dev);
+diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
+index d740c47827751..2f20bd56ec6ca 100644
+--- a/drivers/staging/comedi/drivers/cb_pcidas.c
++++ b/drivers/staging/comedi/drivers/cb_pcidas.c
+@@ -1281,7 +1281,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
+ devpriv->amcc + AMCC_OP_REG_INTCSR);
+
+ ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED,
+- dev->board_name, dev);
++ "cb_pcidas", dev);
+ if (ret) {
+ dev_dbg(dev->class_dev, "unable to allocate irq %d\n",
+ pcidev->irq);
+diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
+index fa987bb0e7cd4..6d3ba399a7f0b 100644
+--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
++++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
+@@ -4035,7 +4035,7 @@ static int auto_attach(struct comedi_device *dev,
+ init_stc_registers(dev);
+
+ retval = request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED,
+- dev->board_name, dev);
++ "cb_pcidas64", dev);
+ if (retval) {
+ dev_dbg(dev->class_dev, "unable to allocate irq %u\n",
+ pcidev->irq);
+diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
+index b84f00b8d18bc..4cabaf21c1ca0 100644
+--- a/drivers/staging/rtl8192e/rtllib.h
++++ b/drivers/staging/rtl8192e/rtllib.h
+@@ -1105,7 +1105,7 @@ struct rtllib_network {
+ bool bWithAironetIE;
+ bool bCkipSupported;
+ bool bCcxRmEnable;
+- u16 CcxRmState[2];
++ u8 CcxRmState[2];
+ bool bMBssidValid;
+ u8 MBssidMask;
+ u8 MBssid[ETH_ALEN];
+diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
+index d31b5e1c8df47..63752233e551f 100644
+--- a/drivers/staging/rtl8192e/rtllib_rx.c
++++ b/drivers/staging/rtl8192e/rtllib_rx.c
+@@ -1968,7 +1968,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
+ info_element->data[2] == 0x96 &&
+ info_element->data[3] == 0x01) {
+ if (info_element->len == 6) {
+- memcpy(network->CcxRmState, &info_element[4], 2);
++ memcpy(network->CcxRmState, &info_element->data[4], 2);
+ if (network->CcxRmState[0] != 0)
+ network->bCcxRmEnable = true;
+ else
+diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
+index 0866e949339bd..9b73532464e55 100644
+--- a/drivers/thermal/thermal_sysfs.c
++++ b/drivers/thermal/thermal_sysfs.c
+@@ -754,6 +754,9 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
+ {
+ struct cooling_dev_stats *stats = cdev->stats;
+
++ if (!stats)
++ return;
++
+ spin_lock(&stats->lock);
+
+ if (stats->state == new_state)
+diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
+index 291649f028213..0d85b55ea8233 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -1177,12 +1177,6 @@ static inline void qcom_geni_serial_enable_early_read(struct geni_se *se,
+ struct console *con) { }
+ #endif
+
+-static int qcom_geni_serial_earlycon_exit(struct console *con)
+-{
+- geni_remove_earlycon_icc_vote();
+- return 0;
+-}
+-
+ static struct qcom_geni_private_data earlycon_private_data;
+
+ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
+@@ -1233,7 +1227,6 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
+ writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+
+ dev->con->write = qcom_geni_serial_earlycon_write;
+- dev->con->exit = qcom_geni_serial_earlycon_exit;
+ dev->con->setup = NULL;
+ qcom_geni_serial_enable_early_read(&se, dev->con);
+
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 2f4e5174e78c8..e79359326411a 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -147,17 +147,29 @@ static inline int acm_set_control(struct acm *acm, int control)
+ #define acm_send_break(acm, ms) \
+ acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
+
+-static void acm_kill_urbs(struct acm *acm)
++static void acm_poison_urbs(struct acm *acm)
+ {
+ int i;
+
+- usb_kill_urb(acm->ctrlurb);
++ usb_poison_urb(acm->ctrlurb);
+ for (i = 0; i < ACM_NW; i++)
+- usb_kill_urb(acm->wb[i].urb);
++ usb_poison_urb(acm->wb[i].urb);
+ for (i = 0; i < acm->rx_buflimit; i++)
+- usb_kill_urb(acm->read_urbs[i]);
++ usb_poison_urb(acm->read_urbs[i]);
+ }
+
++static void acm_unpoison_urbs(struct acm *acm)
++{
++ int i;
++
++ for (i = 0; i < acm->rx_buflimit; i++)
++ usb_unpoison_urb(acm->read_urbs[i]);
++ for (i = 0; i < ACM_NW; i++)
++ usb_unpoison_urb(acm->wb[i].urb);
++ usb_unpoison_urb(acm->ctrlurb);
++}
++
++
+ /*
+ * Write buffer management.
+ * All of these assume proper locks taken by the caller.
+@@ -226,9 +238,10 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
+
+ rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
+ if (rc < 0) {
+- dev_err(&acm->data->dev,
+- "%s - usb_submit_urb(write bulk) failed: %d\n",
+- __func__, rc);
++ if (rc != -EPERM)
++ dev_err(&acm->data->dev,
++ "%s - usb_submit_urb(write bulk) failed: %d\n",
++ __func__, rc);
+ acm_write_done(acm, wb);
+ }
+ return rc;
+@@ -313,8 +326,10 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
+ acm->iocount.dsr++;
+ if (difference & ACM_CTRL_DCD)
+ acm->iocount.dcd++;
+- if (newctrl & ACM_CTRL_BRK)
++ if (newctrl & ACM_CTRL_BRK) {
+ acm->iocount.brk++;
++ tty_insert_flip_char(&acm->port, 0, TTY_BREAK);
++ }
+ if (newctrl & ACM_CTRL_RI)
+ acm->iocount.rng++;
+ if (newctrl & ACM_CTRL_FRAMING)
+@@ -480,11 +495,6 @@ static void acm_read_bulk_callback(struct urb *urb)
+ dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
+ rb->index, urb->actual_length, status);
+
+- if (!acm->dev) {
+- dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
+- return;
+- }
+-
+ switch (status) {
+ case 0:
+ usb_mark_last_busy(acm->dev);
+@@ -649,7 +659,8 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise)
+
+ res = acm_set_control(acm, val);
+ if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE))
+- dev_err(&acm->control->dev, "failed to set dtr/rts\n");
++ /* This is broken in too many devices to spam the logs */
++ dev_dbg(&acm->control->dev, "failed to set dtr/rts\n");
+ }
+
+ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
+@@ -731,6 +742,7 @@ static void acm_port_shutdown(struct tty_port *port)
+ * Need to grab write_lock to prevent race with resume, but no need to
+ * hold it due to the tty-port initialised flag.
+ */
++ acm_poison_urbs(acm);
+ spin_lock_irq(&acm->write_lock);
+ spin_unlock_irq(&acm->write_lock);
+
+@@ -747,7 +759,8 @@ static void acm_port_shutdown(struct tty_port *port)
+ usb_autopm_put_interface_async(acm->control);
+ }
+
+- acm_kill_urbs(acm);
++ acm_unpoison_urbs(acm);
++
+ }
+
+ static void acm_tty_cleanup(struct tty_struct *tty)
+@@ -1503,12 +1516,16 @@ skip_countries:
+
+ return 0;
+ alloc_fail6:
++ if (!acm->combined_interfaces) {
++ /* Clear driver data so that disconnect() returns early. */
++ usb_set_intfdata(data_interface, NULL);
++ usb_driver_release_interface(&acm_driver, data_interface);
++ }
+ if (acm->country_codes) {
+ device_remove_file(&acm->control->dev,
+ &dev_attr_wCountryCodes);
+ device_remove_file(&acm->control->dev,
+ &dev_attr_iCountryCodeRelDate);
+- kfree(acm->country_codes);
+ }
+ device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
+ alloc_fail5:
+@@ -1540,8 +1557,14 @@ static void acm_disconnect(struct usb_interface *intf)
+ if (!acm)
+ return;
+
+- mutex_lock(&acm->mutex);
+ acm->disconnected = true;
++ /*
++ * there is a circular dependency. acm_softint() can resubmit
++ * the URBs in error handling so we need to block any
++ * submission right away
++ */
++ acm_poison_urbs(acm);
++ mutex_lock(&acm->mutex);
+ if (acm->country_codes) {
+ device_remove_file(&acm->control->dev,
+ &dev_attr_wCountryCodes);
+@@ -1560,7 +1583,6 @@ static void acm_disconnect(struct usb_interface *intf)
+ tty_kref_put(tty);
+ }
+
+- acm_kill_urbs(acm);
+ cancel_delayed_work_sync(&acm->dwork);
+
+ tty_unregister_device(acm_tty_driver, acm->minor);
+@@ -1602,7 +1624,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
+ if (cnt)
+ return 0;
+
+- acm_kill_urbs(acm);
++ acm_poison_urbs(acm);
+ cancel_delayed_work_sync(&acm->dwork);
+ acm->urbs_in_error_delay = 0;
+
+@@ -1615,6 +1637,7 @@ static int acm_resume(struct usb_interface *intf)
+ struct urb *urb;
+ int rv = 0;
+
++ acm_unpoison_urbs(acm);
+ spin_lock_irq(&acm->write_lock);
+
+ if (--acm->susp_count)
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 6ade3daf78584..76ac5d6555ae4 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -498,6 +498,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* DJI CineSSD */
+ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+
++ /* Fibocom L850-GL LTE Modem */
++ { USB_DEVICE(0x2cb7, 0x0007), .driver_info =
++ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
++
+ /* INTEL VALUE SSD */
+ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index fc3269f5faf19..1a9789ec5847f 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -4322,7 +4322,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
+ if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
+ goto unlock;
+
+- if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL)
++ if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL ||
++ hsotg->flags.b.port_connect_status == 0)
+ goto skip_power_saving;
+
+ /*
+@@ -5398,7 +5399,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
+ dwc2_writel(hsotg, hprt0, HPRT0);
+
+ /* Wait for the HPRT0.PrtSusp register field to be set */
+- if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
++ if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000))
+ dev_warn(hsotg->dev, "Suspend wasn't generated\n");
+
+ /*
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index bae6a70664c80..598daed8086f6 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -118,6 +118,8 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
+ static const struct property_entry dwc3_pci_mrfld_properties[] = {
+ PROPERTY_ENTRY_STRING("dr_mode", "otg"),
+ PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
++ PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
++ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ {}
+ };
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index c00c4fa139b88..8bd077fb1190f 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -244,6 +244,9 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
+ struct device *dev = qcom->dev;
+ int ret;
+
++ if (has_acpi_companion(dev))
++ return 0;
++
+ qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
+ if (IS_ERR(qcom->icc_path_ddr)) {
+ dev_err(dev, "failed to get usb-ddr path: %ld\n",
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 2a86ad4b12b34..65ff41e3a18eb 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -791,10 +791,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+ reg &= ~DWC3_DALEPENA_EP(dep->number);
+ dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+
+- dep->stream_capable = false;
+- dep->type = 0;
+- dep->flags = 0;
+-
+ /* Clear out the ep descriptors for non-ep0 */
+ if (dep->number > 1) {
+ dep->endpoint.comp_desc = NULL;
+@@ -803,6 +799,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+
+ dwc3_remove_requests(dwc, dep);
+
++ dep->stream_capable = false;
++ dep->type = 0;
++ dep->flags = 0;
++
+ return 0;
+ }
+
+diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
+index 8d387e0e4d91f..c80f9bd51b750 100644
+--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
++++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
+@@ -153,6 +153,11 @@ static int udc_pci_probe(
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
++ dev->phys_addr = resource;
++ dev->irq = pdev->irq;
++ dev->pdev = pdev;
++ dev->dev = &pdev->dev;
++
+ /* init dma pools */
+ if (use_dma) {
+ retval = init_dma_pools(dev);
+@@ -160,11 +165,6 @@ static int udc_pci_probe(
+ goto err_dma;
+ }
+
+- dev->phys_addr = resource;
+- dev->irq = pdev->irq;
+- dev->pdev = pdev;
+- dev->dev = &pdev->dev;
+-
+ /* general probing */
+ if (udc_probe(dev)) {
+ retval = -ENODEV;
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
+index fe010cc61f19b..2f27dc0d9c6bd 100644
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -397,6 +397,13 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ if (mtk->lpm_support)
+ xhci->quirks |= XHCI_LPM_SUPPORT;
++
++ /*
++ * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
++ * and it's 3 when support it.
++ */
++ if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4)
++ xhci->quirks |= XHCI_BROKEN_STREAMS;
+ }
+
+ /* called during probe() after chip reset completes */
+@@ -548,7 +555,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
+ if (ret)
+ goto put_usb3_hcd;
+
+- if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
++ if (HCC_MAX_PSA(xhci->hcc_params) >= 4 &&
++ !(xhci->quirks & XHCI_BROKEN_STREAMS))
+ xhci->shared_hcd->can_do_streams = 1;
+
+ ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 1cd87729ba604..fc0457db62e1a 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2004,10 +2004,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
+ MUSB_DEVCTL_HR;
+ switch (devctl & ~s) {
+ case MUSB_QUIRK_B_DISCONNECT_99:
+- musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+- schedule_delayed_work(&musb->irq_work,
+- msecs_to_jiffies(1000));
+- break;
++ if (musb->quirk_retries && !musb->flush_irq_work) {
++ musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
++ schedule_delayed_work(&musb->irq_work,
++ msecs_to_jiffies(1000));
++ musb->quirk_retries--;
++ break;
++ }
++ fallthrough;
+ case MUSB_QUIRK_B_INVALID_VBUS_91:
+ if (musb->quirk_retries && !musb->flush_irq_work) {
+ musb_dbg(musb,
+diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
+index 3209b5ddd30c9..a20a8380ca0c9 100644
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -594,6 +594,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ pr_err("invalid port number %d\n", wIndex);
+ goto error;
+ }
++ if (wValue >= 32)
++ goto error;
+ if (hcd->speed == HCD_USB3) {
+ if ((vhci_hcd->port_status[rhport] &
+ USB_SS_PORT_STAT_POWER) != 0) {
+diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
+index 40a223381ab61..0f28bf99efebc 100644
+--- a/drivers/vfio/pci/Kconfig
++++ b/drivers/vfio/pci/Kconfig
+@@ -42,7 +42,7 @@ config VFIO_PCI_IGD
+
+ config VFIO_PCI_NVLINK2
+ def_bool y
+- depends on VFIO_PCI && PPC_POWERNV
++ depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU
+ help
+ VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
+
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index a262e12c6dc26..5ccb0705beae1 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -332,8 +332,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
+ vq->error_ctx = NULL;
+ vq->kick = NULL;
+ vq->log_ctx = NULL;
+- vhost_reset_is_le(vq);
+ vhost_disable_cross_endian(vq);
++ vhost_reset_is_le(vq);
+ vq->busyloop_timeout = 0;
+ vq->umem = NULL;
+ vq->iotlb = NULL;
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index bf61598bf1c39..35fdec88d38d9 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -1341,6 +1341,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
+
+ ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
+
++ if (!ops->cursor)
++ return;
++
+ ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
+ get_color(vc, info, c, 0));
+ }
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
+index c8b0ae676809b..4dc9077dd2ac0 100644
+--- a/drivers/video/fbdev/hyperv_fb.c
++++ b/drivers/video/fbdev/hyperv_fb.c
+@@ -1031,7 +1031,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+ if (!pdev) {
+ pr_err("Unable to find PCI Hyper-V video\n");
+- kfree(info->apertures);
+ return -ENODEV;
+ }
+
+@@ -1129,7 +1128,6 @@ getmem_done:
+ } else {
+ pci_dev_put(pdev);
+ }
+- kfree(info->apertures);
+
+ return 0;
+
+@@ -1141,7 +1139,6 @@ err2:
+ err1:
+ if (!gen2vm)
+ pci_dev_put(pdev);
+- kfree(info->apertures);
+
+ return -ENOMEM;
+ }
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index f45f9feebe593..74a5172c2d838 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -626,27 +626,41 @@ int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
+
+ /**
+ * ext4_should_retry_alloc() - check if a block allocation should be retried
+- * @sb: super block
+- * @retries: number of attemps has been made
++ * @sb: superblock
++ * @retries: number of retry attempts made so far
+ *
+- * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
+- * it is profitable to retry the operation, this function will wait
+- * for the current or committing transaction to complete, and then
+- * return TRUE. We will only retry once.
++ * ext4_should_retry_alloc() is called when ENOSPC is returned while
++ * attempting to allocate blocks. If there's an indication that a pending
++ * journal transaction might free some space and allow another attempt to
++ * succeed, this function will wait for the current or committing transaction
++ * to complete and then return TRUE.
+ */
+ int ext4_should_retry_alloc(struct super_block *sb, int *retries)
+ {
+- if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
+- (*retries)++ > 1 ||
+- !EXT4_SB(sb)->s_journal)
++ struct ext4_sb_info *sbi = EXT4_SB(sb);
++
++ if (!sbi->s_journal)
+ return 0;
+
+- smp_mb();
+- if (EXT4_SB(sb)->s_mb_free_pending == 0)
++ if (++(*retries) > 3) {
++ percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit);
+ return 0;
++ }
+
++ /*
++ * if there's no indication that blocks are about to be freed it's
++ * possible we just missed a transaction commit that did so
++ */
++ smp_mb();
++ if (sbi->s_mb_free_pending == 0)
++ return ext4_has_free_clusters(sbi, 1, 0);
++
++ /*
++ * it's possible we've just missed a transaction commit here,
++ * so ignore the returned status
++ */
+ jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
+- jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
++ (void) jbd2_journal_force_commit_nested(sbi->s_journal);
+ return 1;
+ }
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index e5c81593d972c..9ad539ee41964 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1484,6 +1484,7 @@ struct ext4_sb_info {
+ struct percpu_counter s_freeinodes_counter;
+ struct percpu_counter s_dirs_counter;
+ struct percpu_counter s_dirtyclusters_counter;
++ struct percpu_counter s_sra_exceeded_retry_limit;
+ struct blockgroup_lock *s_blockgroup_lock;
+ struct proc_dir_entry *s_proc;
+ struct kobject s_kobj;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index ed498538a7499..3b9f7bf4045b0 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1937,13 +1937,13 @@ static int __ext4_journalled_writepage(struct page *page,
+ if (!ret)
+ ret = err;
+
+- if (!ext4_has_inline_data(inode))
+- ext4_walk_page_buffers(NULL, page_bufs, 0, len,
+- NULL, bput_one);
+ ext4_set_inode_state(inode, EXT4_STATE_JDATA);
+ out:
+ unlock_page(page);
+ out_no_pagelock:
++ if (!inline_data && page_bufs)
++ ext4_walk_page_buffers(NULL, page_bufs, 0, len,
++ NULL, bput_one);
+ brelse(inode_bh);
+ return ret;
+ }
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 078f26f4b56e3..9cc9e6c1d582f 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -3785,14 +3785,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ */
+ retval = -ENOENT;
+ if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
+- goto end_rename;
++ goto release_bh;
+
+ new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
+ &new.de, &new.inlined);
+ if (IS_ERR(new.bh)) {
+ retval = PTR_ERR(new.bh);
+ new.bh = NULL;
+- goto end_rename;
++ goto release_bh;
+ }
+ if (new.bh) {
+ if (!new.inode) {
+@@ -3809,15 +3809,13 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
+ if (IS_ERR(handle)) {
+ retval = PTR_ERR(handle);
+- handle = NULL;
+- goto end_rename;
++ goto release_bh;
+ }
+ } else {
+ whiteout = ext4_whiteout_for_rename(&old, credits, &handle);
+ if (IS_ERR(whiteout)) {
+ retval = PTR_ERR(whiteout);
+- whiteout = NULL;
+- goto end_rename;
++ goto release_bh;
+ }
+ }
+
+@@ -3954,16 +3952,18 @@ end_rename:
+ ext4_resetent(handle, &old,
+ old.inode->i_ino, old_file_type);
+ drop_nlink(whiteout);
++ ext4_orphan_add(handle, whiteout);
+ }
+ unlock_new_inode(whiteout);
++ ext4_journal_stop(handle);
+ iput(whiteout);
+-
++ } else {
++ ext4_journal_stop(handle);
+ }
++release_bh:
+ brelse(old.dir_bh);
+ brelse(old.bh);
+ brelse(new.bh);
+- if (handle)
+- ext4_journal_stop(handle);
+ return retval;
+ }
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index a1353b0825ea3..c8cc8175b376b 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1210,6 +1210,7 @@ static void ext4_put_super(struct super_block *sb)
+ percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ percpu_counter_destroy(&sbi->s_dirs_counter);
+ percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
++ percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
+ percpu_free_rwsem(&sbi->s_writepages_rwsem);
+ #ifdef CONFIG_QUOTA
+ for (i = 0; i < EXT4_MAXQUOTAS; i++)
+@@ -5011,6 +5012,9 @@ no_journal:
+ if (!err)
+ err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
+ GFP_KERNEL);
++ if (!err)
++ err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
++ GFP_KERNEL);
+ if (!err)
+ err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
+
+@@ -5124,6 +5128,7 @@ failed_mount6:
+ percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ percpu_counter_destroy(&sbi->s_dirs_counter);
+ percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
++ percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
+ percpu_free_rwsem(&sbi->s_writepages_rwsem);
+ failed_mount5:
+ ext4_ext_release(sb);
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index 075aa3a19ff5f..a3d08276d441e 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -24,6 +24,7 @@ typedef enum {
+ attr_session_write_kbytes,
+ attr_lifetime_write_kbytes,
+ attr_reserved_clusters,
++ attr_sra_exceeded_retry_limit,
+ attr_inode_readahead,
+ attr_trigger_test_error,
+ attr_first_error_time,
+@@ -202,6 +203,7 @@ EXT4_ATTR_FUNC(delayed_allocation_blocks, 0444);
+ EXT4_ATTR_FUNC(session_write_kbytes, 0444);
+ EXT4_ATTR_FUNC(lifetime_write_kbytes, 0444);
+ EXT4_ATTR_FUNC(reserved_clusters, 0644);
++EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444);
+
+ EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
+ ext4_sb_info, s_inode_readahead_blks);
+@@ -251,6 +253,7 @@ static struct attribute *ext4_attrs[] = {
+ ATTR_LIST(session_write_kbytes),
+ ATTR_LIST(lifetime_write_kbytes),
+ ATTR_LIST(reserved_clusters),
++ ATTR_LIST(sra_exceeded_retry_limit),
+ ATTR_LIST(inode_readahead_blks),
+ ATTR_LIST(inode_goal),
+ ATTR_LIST(mb_stats),
+@@ -374,6 +377,10 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)
+ atomic64_read(&sbi->s_resv_clusters));
++ case attr_sra_exceeded_retry_limit:
++ return snprintf(buf, PAGE_SIZE, "%llu\n",
++ (unsigned long long)
++ percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));
+ case attr_inode_readahead:
+ case attr_pointer_ui:
+ if (!ptr)
+diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
+index 8868ac31a3c0a..4ee6f734ba838 100644
+--- a/fs/fuse/virtio_fs.c
++++ b/fs/fuse/virtio_fs.c
+@@ -1324,8 +1324,15 @@ static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
+
+ /* virtiofs allocates and installs its own fuse devices */
+ ctx->fudptr = NULL;
+- if (ctx->dax)
++ if (ctx->dax) {
++ if (!fs->dax_dev) {
++ err = -EINVAL;
++ pr_err("virtio-fs: dax can't be enabled as filesystem"
++ " device does not support it.\n");
++ goto err_free_fuse_devs;
++ }
+ ctx->dax_dev = fs->dax_dev;
++ }
+ err = fuse_fill_super_common(sb, ctx);
+ if (err < 0)
+ goto err_free_fuse_devs;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 5c4378694d541..8b4213de9e085 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4628,6 +4628,7 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
+ struct io_async_msghdr iomsg, *kmsg;
+ struct socket *sock;
+ unsigned flags;
++ int min_ret = 0;
+ int ret;
+
+ sock = sock_from_file(req->file);
+@@ -4648,12 +4649,15 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
+ kmsg = &iomsg;
+ }
+
+- flags = req->sr_msg.msg_flags;
++ flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
++ if (flags & MSG_WAITALL)
++ min_ret = iov_iter_count(&kmsg->msg.msg_iter);
++
+ ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
+ if (force_nonblock && ret == -EAGAIN)
+ return io_setup_async_msg(req, kmsg);
+@@ -4663,7 +4667,7 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
+ if (kmsg->iov != kmsg->fast_iov)
+ kfree(kmsg->iov);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+- if (ret < 0)
++ if (ret < min_ret)
+ req_set_fail_links(req);
+ __io_req_complete(req, ret, 0, cs);
+ return 0;
+@@ -4677,6 +4681,7 @@ static int io_send(struct io_kiocb *req, bool force_nonblock,
+ struct iovec iov;
+ struct socket *sock;
+ unsigned flags;
++ int min_ret = 0;
+ int ret;
+
+ sock = sock_from_file(req->file);
+@@ -4692,12 +4697,15 @@ static int io_send(struct io_kiocb *req, bool force_nonblock,
+ msg.msg_controllen = 0;
+ msg.msg_namelen = 0;
+
+- flags = req->sr_msg.msg_flags;
++ flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
++ if (flags & MSG_WAITALL)
++ min_ret = iov_iter_count(&msg.msg_iter);
++
+ msg.msg_flags = flags;
+ ret = sock_sendmsg(sock, &msg);
+ if (force_nonblock && ret == -EAGAIN)
+@@ -4705,7 +4713,7 @@ static int io_send(struct io_kiocb *req, bool force_nonblock,
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+
+- if (ret < 0)
++ if (ret < min_ret)
+ req_set_fail_links(req);
+ __io_req_complete(req, ret, 0, cs);
+ return 0;
+@@ -4857,6 +4865,7 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
+ struct socket *sock;
+ struct io_buffer *kbuf;
+ unsigned flags;
++ int min_ret = 0;
+ int ret, cflags = 0;
+
+ sock = sock_from_file(req->file);
+@@ -4886,12 +4895,15 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
+ 1, req->sr_msg.len);
+ }
+
+- flags = req->sr_msg.msg_flags;
++ flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
++ if (flags & MSG_WAITALL)
++ min_ret = iov_iter_count(&kmsg->msg.msg_iter);
++
+ ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
+ kmsg->uaddr, flags);
+ if (force_nonblock && ret == -EAGAIN)
+@@ -4904,7 +4916,7 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
+ if (kmsg->iov != kmsg->fast_iov)
+ kfree(kmsg->iov);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+- if (ret < 0)
++ if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
+ req_set_fail_links(req);
+ __io_req_complete(req, ret, cflags, cs);
+ return 0;
+@@ -4920,6 +4932,7 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
+ struct socket *sock;
+ struct iovec iov;
+ unsigned flags;
++ int min_ret = 0;
+ int ret, cflags = 0;
+
+ sock = sock_from_file(req->file);
+@@ -4944,12 +4957,15 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
+ msg.msg_iocb = NULL;
+ msg.msg_flags = 0;
+
+- flags = req->sr_msg.msg_flags;
++ flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
++ if (flags & MSG_WAITALL)
++ min_ret = iov_iter_count(&msg.msg_iter);
++
+ ret = sock_recvmsg(sock, &msg, flags);
+ if (force_nonblock && ret == -EAGAIN)
+ return -EAGAIN;
+@@ -4958,7 +4974,7 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
+ out_free:
+ if (req->flags & REQ_F_BUFFER_SELECTED)
+ cflags = io_put_recv_kbuf(req);
+- if (ret < 0)
++ if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
+ req_set_fail_links(req);
+ __io_req_complete(req, ret, cflags, cs);
+ return 0;
+@@ -6496,7 +6512,6 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+
+ if (prev) {
+- req_set_fail_links(prev);
+ io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
+ io_put_req_deferred(prev, 1);
+ } else {
+@@ -8723,6 +8738,14 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
+ if (!io_sqring_full(ctx))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+
++ /* prevent SQPOLL from submitting new requests */
++ if (ctx->sq_data) {
++ io_sq_thread_park(ctx->sq_data);
++ list_del_init(&ctx->sqd_list);
++ io_sqd_update_thread_idle(ctx->sq_data);
++ io_sq_thread_unpark(ctx->sq_data);
++ }
++
+ /*
+ * Don't flush cqring overflow list here, just do a simple check.
+ * Otherwise there could possible be ABBA deadlock:
+diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
+index a648dbf6991e4..a5e478de14174 100644
+--- a/fs/iomap/swapfile.c
++++ b/fs/iomap/swapfile.c
+@@ -170,6 +170,16 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
+ return ret;
+ }
+
++ /*
++ * If this swapfile doesn't contain even a single page-aligned
++ * contiguous range of blocks, reject this useless swapfile to
++ * prevent confusion later on.
++ */
++ if (isi.nr_pages == 0) {
++ pr_warn("swapon: Cannot find a single usable page in file.\n");
++ return -EINVAL;
++ }
++
+ *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
+ sis->max = isi.nr_pages;
+ sis->pages = isi.nr_pages - 1;
+diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
+index dbbc583d62730..248f1459c0399 100644
+--- a/fs/nfsd/Kconfig
++++ b/fs/nfsd/Kconfig
+@@ -73,6 +73,7 @@ config NFSD_V4
+ select NFSD_V3
+ select FS_POSIX_ACL
+ select SUNRPC_GSS
++ select CRYPTO
+ select CRYPTO_MD5
+ select CRYPTO_SHA256
+ select GRACE_PERIOD
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 052be5bf9ef50..7325592b456e5 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -1189,6 +1189,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
+ switch (task->tk_status) {
+ case -EIO:
+ case -ETIMEDOUT:
++ case -EACCES:
+ nfsd4_mark_cb_down(clp, task->tk_status);
+ }
+ break;
+diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h
+index c764352447ba1..81bec2c80b25c 100644
+--- a/fs/reiserfs/xattr.h
++++ b/fs/reiserfs/xattr.h
+@@ -43,7 +43,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec);
+
+ static inline int reiserfs_xattrs_initialized(struct super_block *sb)
+ {
+- return REISERFS_SB(sb)->priv_root != NULL;
++ return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root;
+ }
+
+ #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
+diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
+index 2564e66e67d74..562cb5e455240 100644
+--- a/include/drm/ttm/ttm_bo_api.h
++++ b/include/drm/ttm/ttm_bo_api.h
+@@ -612,8 +612,10 @@ static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
+ static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
+ {
+ dma_resv_assert_held(bo->base.resv);
+- WARN_ON_ONCE(!bo->pin_count);
+- --bo->pin_count;
++ if (bo->pin_count)
++ --bo->pin_count;
++ else
++ WARN_ON_ONCE(true);
+ }
+
+ int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index b20568c440013..2f7508c3c2d6a 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -222,10 +222,14 @@ void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
+ void __acpi_unmap_table(void __iomem *map, unsigned long size);
+ int early_acpi_boot_init(void);
+ int acpi_boot_init (void);
++void acpi_boot_table_prepare (void);
+ void acpi_boot_table_init (void);
+ int acpi_mps_check (void);
+ int acpi_numa_init (void);
+
++int acpi_locate_initial_tables (void);
++void acpi_reserve_initial_tables (void);
++void acpi_table_init_complete (void);
+ int acpi_table_init (void);
+ int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
+ int __init acpi_table_parse_entries(char *id, unsigned long table_size,
+@@ -807,9 +811,12 @@ static inline int acpi_boot_init(void)
+ return 0;
+ }
+
++static inline void acpi_boot_table_prepare(void)
++{
++}
++
+ static inline void acpi_boot_table_init(void)
+ {
+- return;
+ }
+
+ static inline int acpi_mps_check(void)
+diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h
+index 2f5d731ae251d..8afa92d15a664 100644
+--- a/include/linux/can/can-ml.h
++++ b/include/linux/can/can-ml.h
+@@ -44,6 +44,7 @@
+
+ #include <linux/can.h>
+ #include <linux/list.h>
++#include <linux/netdevice.h>
+
+ #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
+ #define CAN_EFF_RCV_HASH_BITS 10
+@@ -65,4 +66,15 @@ struct can_ml_priv {
+ #endif
+ };
+
++static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev)
++{
++ return netdev_get_ml_priv(dev, ML_PRIV_CAN);
++}
++
++static inline void can_set_ml_priv(struct net_device *dev,
++ struct can_ml_priv *ml_priv)
++{
++ netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN);
++}
++
+ #endif /* CAN_ML_H */
+diff --git a/include/linux/extcon.h b/include/linux/extcon.h
+index fd183fb9c20f7..0c19010da77fa 100644
+--- a/include/linux/extcon.h
++++ b/include/linux/extcon.h
+@@ -271,6 +271,29 @@ static inline void devm_extcon_unregister_notifier(struct device *dev,
+ struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb) { }
+
++static inline int extcon_register_notifier_all(struct extcon_dev *edev,
++ struct notifier_block *nb)
++{
++ return 0;
++}
++
++static inline int extcon_unregister_notifier_all(struct extcon_dev *edev,
++ struct notifier_block *nb)
++{
++ return 0;
++}
++
++static inline int devm_extcon_register_notifier_all(struct device *dev,
++ struct extcon_dev *edev,
++ struct notifier_block *nb)
++{
++ return 0;
++}
++
++static inline void devm_extcon_unregister_notifier_all(struct device *dev,
++ struct extcon_dev *edev,
++ struct notifier_block *nb) { }
++
+ static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
+ {
+ return ERR_PTR(-ENODEV);
+diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
+index a93d85932eb92..f843c6a10cf36 100644
+--- a/include/linux/firmware/intel/stratix10-svc-client.h
++++ b/include/linux/firmware/intel/stratix10-svc-client.h
+@@ -56,7 +56,7 @@
+ * COMMAND_RECONFIG_FLAG_PARTIAL:
+ * Set to FPGA configuration type (full or partial).
+ */
+-#define COMMAND_RECONFIG_FLAG_PARTIAL 1
++#define COMMAND_RECONFIG_FLAG_PARTIAL 0
+
+ /**
+ * Timeout settings for service clients:
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index fb79ac497794b..688c7477ec0ab 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1607,6 +1607,12 @@ enum netdev_priv_flags {
+ #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
+ #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
+
++/* Specifies the type of the struct net_device::ml_priv pointer */
++enum netdev_ml_priv_type {
++ ML_PRIV_NONE,
++ ML_PRIV_CAN,
++};
++
+ /**
+ * struct net_device - The DEVICE structure.
+ *
+@@ -1802,6 +1808,7 @@ enum netdev_priv_flags {
+ * @nd_net: Network namespace this network device is inside
+ *
+ * @ml_priv: Mid-layer private
++ * @ml_priv_type: Mid-layer private type
+ * @lstats: Loopback statistics
+ * @tstats: Tunnel statistics
+ * @dstats: Dummy statistics
+@@ -2114,8 +2121,10 @@ struct net_device {
+ possible_net_t nd_net;
+
+ /* mid-layer private */
++ void *ml_priv;
++ enum netdev_ml_priv_type ml_priv_type;
++
+ union {
+- void *ml_priv;
+ struct pcpu_lstats __percpu *lstats;
+ struct pcpu_sw_netstats __percpu *tstats;
+ struct pcpu_dstats __percpu *dstats;
+@@ -2305,6 +2314,29 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev)
+ netdev_set_rx_headroom(dev, -1);
+ }
+
++static inline void *netdev_get_ml_priv(struct net_device *dev,
++ enum netdev_ml_priv_type type)
++{
++ if (dev->ml_priv_type != type)
++ return NULL;
++
++ return dev->ml_priv;
++}
++
++static inline void netdev_set_ml_priv(struct net_device *dev,
++ void *ml_priv,
++ enum netdev_ml_priv_type type)
++{
++ WARN(dev->ml_priv_type && dev->ml_priv_type != type,
++ "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
++ dev->ml_priv_type, type);
++ WARN(!dev->ml_priv_type && dev->ml_priv,
++ "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
++
++ dev->ml_priv = ml_priv;
++ dev->ml_priv_type = type;
++}
++
+ /*
+ * Net namespace inlines
+ */
+diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
+index ec2ad4b0fe14f..c4fdb4463f7d5 100644
+--- a/include/linux/qcom-geni-se.h
++++ b/include/linux/qcom-geni-se.h
+@@ -460,7 +460,5 @@ void geni_icc_set_tag(struct geni_se *se, u32 tag);
+ int geni_icc_enable(struct geni_se *se);
+
+ int geni_icc_disable(struct geni_se *se);
+-
+-void geni_remove_earlycon_icc_vote(void);
+ #endif
+ #endif
+diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
+index 850424e5d0306..6ecf2a0220dbe 100644
+--- a/include/linux/ww_mutex.h
++++ b/include/linux/ww_mutex.h
+@@ -173,9 +173,10 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
+ */
+ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
+ {
+-#ifdef CONFIG_DEBUG_MUTEXES
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ mutex_release(&ctx->dep_map, _THIS_IP_);
+-
++#endif
++#ifdef CONFIG_DEBUG_MUTEXES
+ DEBUG_LOCKS_WARN_ON(ctx->acquired);
+ if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
+ /*
+diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
+index 5352ce50a97e3..2c25b830203cd 100644
+--- a/kernel/locking/mutex.c
++++ b/kernel/locking/mutex.c
+@@ -636,7 +636,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
+ */
+ static __always_inline bool
+ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+- const bool use_ww_ctx, struct mutex_waiter *waiter)
++ struct mutex_waiter *waiter)
+ {
+ if (!waiter) {
+ /*
+@@ -712,7 +712,7 @@ fail:
+ #else
+ static __always_inline bool
+ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+- const bool use_ww_ctx, struct mutex_waiter *waiter)
++ struct mutex_waiter *waiter)
+ {
+ return false;
+ }
+@@ -932,6 +932,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ struct ww_mutex *ww;
+ int ret;
+
++ if (!use_ww_ctx)
++ ww_ctx = NULL;
++
+ might_sleep();
+
+ #ifdef CONFIG_DEBUG_MUTEXES
+@@ -939,7 +942,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ #endif
+
+ ww = container_of(lock, struct ww_mutex, base);
+- if (use_ww_ctx && ww_ctx) {
++ if (ww_ctx) {
+ if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
+ return -EALREADY;
+
+@@ -956,10 +959,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
+
+ if (__mutex_trylock(lock) ||
+- mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
++ mutex_optimistic_spin(lock, ww_ctx, NULL)) {
+ /* got the lock, yay! */
+ lock_acquired(&lock->dep_map, ip);
+- if (use_ww_ctx && ww_ctx)
++ if (ww_ctx)
+ ww_mutex_set_context_fastpath(ww, ww_ctx);
+ preempt_enable();
+ return 0;
+@@ -970,7 +973,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ * After waiting to acquire the wait_lock, try again.
+ */
+ if (__mutex_trylock(lock)) {
+- if (use_ww_ctx && ww_ctx)
++ if (ww_ctx)
+ __ww_mutex_check_waiters(lock, ww_ctx);
+
+ goto skip_wait;
+@@ -1023,7 +1026,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ goto err;
+ }
+
+- if (use_ww_ctx && ww_ctx) {
++ if (ww_ctx) {
+ ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
+ if (ret)
+ goto err;
+@@ -1036,7 +1039,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ * ww_mutex needs to always recheck its position since its waiter
+ * list is not FIFO ordered.
+ */
+- if ((use_ww_ctx && ww_ctx) || !first) {
++ if (ww_ctx || !first) {
+ first = __mutex_waiter_is_first(lock, &waiter);
+ if (first)
+ __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
+@@ -1049,7 +1052,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ * or we must see its unlock and acquire.
+ */
+ if (__mutex_trylock(lock) ||
+- (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
++ (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
+ break;
+
+ spin_lock(&lock->wait_lock);
+@@ -1058,7 +1061,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ acquired:
+ __set_current_state(TASK_RUNNING);
+
+- if (use_ww_ctx && ww_ctx) {
++ if (ww_ctx) {
+ /*
+ * Wound-Wait; we stole the lock (!first_waiter), check the
+ * waiters as anyone might want to wound us.
+@@ -1078,7 +1081,7 @@ skip_wait:
+ /* got the lock - cleanup and rejoice! */
+ lock_acquired(&lock->dep_map, ip);
+
+- if (use_ww_ctx && ww_ctx)
++ if (ww_ctx)
+ ww_mutex_lock_acquired(ww, ww_ctx);
+
+ spin_unlock(&lock->wait_lock);
+diff --git a/kernel/reboot.c b/kernel/reboot.c
+index eb1b158507616..a6ad5eb2fa733 100644
+--- a/kernel/reboot.c
++++ b/kernel/reboot.c
+@@ -244,8 +244,6 @@ void migrate_to_reboot_cpu(void)
+ void kernel_restart(char *cmd)
+ {
+ kernel_restart_prepare(cmd);
+- if (pm_power_off_prepare)
+- pm_power_off_prepare();
+ migrate_to_reboot_cpu();
+ syscore_shutdown();
+ if (!cmd)
+diff --git a/kernel/static_call.c b/kernel/static_call.c
+index 49efbdc5b4800..f59089a122319 100644
+--- a/kernel/static_call.c
++++ b/kernel/static_call.c
+@@ -149,6 +149,7 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+ };
+
+ for (site_mod = &first; site_mod; site_mod = site_mod->next) {
++ bool init = system_state < SYSTEM_RUNNING;
+ struct module *mod = site_mod->mod;
+
+ if (!site_mod->sites) {
+@@ -168,6 +169,7 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+ if (mod) {
+ stop = mod->static_call_sites +
+ mod->num_static_call_sites;
++ init = mod->state == MODULE_STATE_COMING;
+ }
+ #endif
+
+@@ -175,16 +177,8 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+ site < stop && static_call_key(site) == key; site++) {
+ void *site_addr = static_call_addr(site);
+
+- if (static_call_is_init(site)) {
+- /*
+- * Don't write to call sites which were in
+- * initmem and have since been freed.
+- */
+- if (!mod && system_state >= SYSTEM_RUNNING)
+- continue;
+- if (mod && !within_module_init((unsigned long)site_addr, mod))
+- continue;
+- }
++ if (!init && static_call_is_init(site))
++ continue;
+
+ if (!kernel_text_address((unsigned long)site_addr)) {
+ /*
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index b5815a022ecc2..c27b05aeb7d2d 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2984,7 +2984,8 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
+
+ size = nr_entries * sizeof(unsigned long);
+ event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
+- sizeof(*entry) + size, flags, pc);
++ (sizeof(*entry) - sizeof(entry->caller)) + size,
++ flags, pc);
+ if (!event)
+ goto out;
+ entry = ring_buffer_event_data(event);
+diff --git a/mm/memory.c b/mm/memory.c
+index 97e1d045f236f..bf0cbc8d56176 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -154,7 +154,7 @@ static int __init init_zero_pfn(void)
+ zero_pfn = page_to_pfn(ZERO_PAGE(0));
+ return 0;
+ }
+-core_initcall(init_zero_pfn);
++early_initcall(init_zero_pfn);
+
+ void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
+ {
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 4f62f299da0cf..0a9019da18f39 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -1623,10 +1623,6 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
+ }
+
+ p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
+- if (!count) {
+- p9_tag_remove(clnt, req);
+- return 0;
+- }
+
+ if (non_zc) {
+ int n = copy_to_iter(dataptr, count, to);
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index ca1a0d07a0878..ebda397fa95a7 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1577,8 +1577,8 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ struct sk_buff *skb;
+ struct net_device *dev;
+ struct ddpehdr *ddp;
+- int size;
+- struct atalk_route *rt;
++ int size, hard_header_len;
++ struct atalk_route *rt, *rt_lo = NULL;
+ int err;
+
+ if (flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
+@@ -1641,7 +1641,22 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n",
+ sk, size, dev->name);
+
+- size += dev->hard_header_len;
++ hard_header_len = dev->hard_header_len;
++ /* Leave room for loopback hardware header if necessary */
++ if (usat->sat_addr.s_node == ATADDR_BCAST &&
++ (dev->flags & IFF_LOOPBACK || !(rt->flags & RTF_GATEWAY))) {
++ struct atalk_addr at_lo;
++
++ at_lo.s_node = 0;
++ at_lo.s_net = 0;
++
++ rt_lo = atrtr_find(&at_lo);
++
++ if (rt_lo && rt_lo->dev->hard_header_len > hard_header_len)
++ hard_header_len = rt_lo->dev->hard_header_len;
++ }
++
++ size += hard_header_len;
+ release_sock(sk);
+ skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err);
+ lock_sock(sk);
+@@ -1649,7 +1664,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ goto out;
+
+ skb_reserve(skb, ddp_dl->header_length);
+- skb_reserve(skb, dev->hard_header_len);
++ skb_reserve(skb, hard_header_len);
+ skb->dev = dev;
+
+ SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk);
+@@ -1700,18 +1715,12 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ /* loop back */
+ skb_orphan(skb);
+ if (ddp->deh_dnode == ATADDR_BCAST) {
+- struct atalk_addr at_lo;
+-
+- at_lo.s_node = 0;
+- at_lo.s_net = 0;
+-
+- rt = atrtr_find(&at_lo);
+- if (!rt) {
++ if (!rt_lo) {
+ kfree_skb(skb);
+ err = -ENETUNREACH;
+ goto out;
+ }
+- dev = rt->dev;
++ dev = rt_lo->dev;
+ skb->dev = dev;
+ }
+ ddp_dl->request(ddp_dl, skb, dev->dev_addr);
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 837bb8af0ec30..cce2af10eb3ea 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -304,8 +304,8 @@ static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net,
+ struct net_device *dev)
+ {
+ if (dev) {
+- struct can_ml_priv *ml_priv = dev->ml_priv;
+- return &ml_priv->dev_rcv_lists;
++ struct can_ml_priv *can_ml = can_get_ml_priv(dev);
++ return &can_ml->dev_rcv_lists;
+ } else {
+ return net->can.rx_alldev_list;
+ }
+@@ -790,25 +790,6 @@ void can_proto_unregister(const struct can_proto *cp)
+ }
+ EXPORT_SYMBOL(can_proto_unregister);
+
+-/* af_can notifier to create/remove CAN netdevice specific structs */
+-static int can_notifier(struct notifier_block *nb, unsigned long msg,
+- void *ptr)
+-{
+- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+-
+- if (dev->type != ARPHRD_CAN)
+- return NOTIFY_DONE;
+-
+- switch (msg) {
+- case NETDEV_REGISTER:
+- WARN(!dev->ml_priv,
+- "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
+- break;
+- }
+-
+- return NOTIFY_DONE;
+-}
+-
+ static int can_pernet_init(struct net *net)
+ {
+ spin_lock_init(&net->can.rcvlists_lock);
+@@ -876,11 +857,6 @@ static const struct net_proto_family can_family_ops = {
+ .owner = THIS_MODULE,
+ };
+
+-/* notifier block for netdevice event */
+-static struct notifier_block can_netdev_notifier __read_mostly = {
+- .notifier_call = can_notifier,
+-};
+-
+ static struct pernet_operations can_pernet_ops __read_mostly = {
+ .init = can_pernet_init,
+ .exit = can_pernet_exit,
+@@ -911,17 +887,12 @@ static __init int can_init(void)
+ err = sock_register(&can_family_ops);
+ if (err)
+ goto out_sock;
+- err = register_netdevice_notifier(&can_netdev_notifier);
+- if (err)
+- goto out_notifier;
+
+ dev_add_pack(&can_packet);
+ dev_add_pack(&canfd_packet);
+
+ return 0;
+
+-out_notifier:
+- sock_unregister(PF_CAN);
+ out_sock:
+ unregister_pernet_subsys(&can_pernet_ops);
+ out_pernet:
+@@ -935,7 +906,6 @@ static __exit void can_exit(void)
+ /* protocol unregister */
+ dev_remove_pack(&canfd_packet);
+ dev_remove_pack(&can_packet);
+- unregister_netdevice_notifier(&can_netdev_notifier);
+ sock_unregister(PF_CAN);
+
+ unregister_pernet_subsys(&can_pernet_ops);
+diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
+index bb914d8b42168..da3a7a7bcff2b 100644
+--- a/net/can/j1939/main.c
++++ b/net/can/j1939/main.c
+@@ -140,9 +140,9 @@ static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
+ static inline void j1939_priv_set(struct net_device *ndev,
+ struct j1939_priv *priv)
+ {
+- struct can_ml_priv *can_ml_priv = ndev->ml_priv;
++ struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
+
+- can_ml_priv->j1939_priv = priv;
++ can_ml->j1939_priv = priv;
+ }
+
+ static void __j1939_priv_release(struct kref *kref)
+@@ -211,12 +211,9 @@ static void __j1939_rx_release(struct kref *kref)
+ /* get pointer to priv without increasing ref counter */
+ static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev)
+ {
+- struct can_ml_priv *can_ml_priv = ndev->ml_priv;
++ struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
+
+- if (!can_ml_priv)
+- return NULL;
+-
+- return can_ml_priv->j1939_priv;
++ return can_ml->j1939_priv;
+ }
+
+ static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev)
+@@ -225,9 +222,6 @@ static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev)
+
+ lockdep_assert_held(&j1939_netdev_lock);
+
+- if (ndev->type != ARPHRD_CAN)
+- return NULL;
+-
+ priv = j1939_ndev_to_priv(ndev);
+ if (priv)
+ j1939_priv_get(priv);
+@@ -348,15 +342,16 @@ static int j1939_netdev_notify(struct notifier_block *nb,
+ unsigned long msg, void *data)
+ {
+ struct net_device *ndev = netdev_notifier_info_to_dev(data);
++ struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
+ struct j1939_priv *priv;
+
++ if (!can_ml)
++ goto notify_done;
++
+ priv = j1939_priv_get_by_ndev(ndev);
+ if (!priv)
+ goto notify_done;
+
+- if (ndev->type != ARPHRD_CAN)
+- goto notify_put;
+-
+ switch (msg) {
+ case NETDEV_DOWN:
+ j1939_cancel_active_session(priv, NULL);
+@@ -365,7 +360,6 @@ static int j1939_netdev_notify(struct notifier_block *nb,
+ break;
+ }
+
+-notify_put:
+ j1939_priv_put(priv);
+
+ notify_done:
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index f23966526a885..56aa66147d5ac 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -12,6 +12,7 @@
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
++#include <linux/can/can-ml.h>
+ #include <linux/can/core.h>
+ #include <linux/can/skb.h>
+ #include <linux/errqueue.h>
+@@ -453,6 +454,7 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ j1939_jsk_del(priv, jsk);
+ j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
+ } else {
++ struct can_ml_priv *can_ml;
+ struct net_device *ndev;
+
+ ndev = dev_get_by_index(net, addr->can_ifindex);
+@@ -461,15 +463,8 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ goto out_release_sock;
+ }
+
+- if (ndev->type != ARPHRD_CAN) {
+- dev_put(ndev);
+- ret = -ENODEV;
+- goto out_release_sock;
+- }
+-
+- if (!ndev->ml_priv) {
+- netdev_warn_once(ndev,
+- "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
++ can_ml = can_get_ml_priv(ndev);
++ if (!can_ml) {
+ dev_put(ndev);
+ ret = -ENODEV;
+ goto out_release_sock;
+diff --git a/net/can/proc.c b/net/can/proc.c
+index 5ea8695f507eb..b15760b5c1cce 100644
+--- a/net/can/proc.c
++++ b/net/can/proc.c
+@@ -322,8 +322,11 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v)
+
+ /* receive list for registered CAN devices */
+ for_each_netdev_rcu(net, dev) {
+- if (dev->type == ARPHRD_CAN && dev->ml_priv)
+- can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv);
++ struct can_ml_priv *can_ml = can_get_ml_priv(dev);
++
++ if (can_ml)
++ can_rcvlist_proc_show_one(m, idx, dev,
++ &can_ml->dev_rcv_lists);
+ }
+
+ rcu_read_unlock();
+@@ -375,8 +378,10 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
+
+ /* sff receive list for registered CAN devices */
+ for_each_netdev_rcu(net, dev) {
+- if (dev->type == ARPHRD_CAN && dev->ml_priv) {
+- dev_rcv_lists = dev->ml_priv;
++ struct can_ml_priv *can_ml = can_get_ml_priv(dev);
++
++ if (can_ml) {
++ dev_rcv_lists = &can_ml->dev_rcv_lists;
+ can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_sff,
+ ARRAY_SIZE(dev_rcv_lists->rx_sff));
+ }
+@@ -406,8 +411,10 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
+
+ /* eff receive list for registered CAN devices */
+ for_each_netdev_rcu(net, dev) {
+- if (dev->type == ARPHRD_CAN && dev->ml_priv) {
+- dev_rcv_lists = dev->ml_priv;
++ struct can_ml_priv *can_ml = can_get_ml_priv(dev);
++
++ if (can_ml) {
++ dev_rcv_lists = &can_ml->dev_rcv_lists;
+ can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_eff,
+ ARRAY_SIZE(dev_rcv_lists->rx_eff));
+ }
+diff --git a/net/core/filter.c b/net/core/filter.c
+index ee665720a41a0..8e4cc25cc98e1 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3552,11 +3552,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
+ return 0;
+ }
+
+-static u32 __bpf_skb_max_len(const struct sk_buff *skb)
+-{
+- return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
+- SKB_MAX_ALLOC;
+-}
++#define BPF_SKB_MAX_LEN SKB_MAX_ALLOC
+
+ BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
+ u32, mode, u64, flags)
+@@ -3605,7 +3601,7 @@ BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
+ {
+ u32 len_cur, len_diff_abs = abs(len_diff);
+ u32 len_min = bpf_skb_net_base_len(skb);
+- u32 len_max = __bpf_skb_max_len(skb);
++ u32 len_max = BPF_SKB_MAX_LEN;
+ __be16 proto = skb->protocol;
+ bool shrink = len_diff < 0;
+ u32 off;
+@@ -3688,7 +3684,7 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
+ static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
+ u64 flags)
+ {
+- u32 max_len = __bpf_skb_max_len(skb);
++ u32 max_len = BPF_SKB_MAX_LEN;
+ u32 min_len = __bpf_skb_min_len(skb);
+ int ret;
+
+@@ -3764,7 +3760,7 @@ static const struct bpf_func_proto sk_skb_change_tail_proto = {
+ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
+ u64 flags)
+ {
+- u32 max_len = __bpf_skb_max_len(skb);
++ u32 max_len = BPF_SKB_MAX_LEN;
+ u32 new_len = skb->len + head_room;
+ int ret;
+
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 7a06d43016175..180be5102efc5 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1050,6 +1050,9 @@ proto_again:
+ key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ }
+
++ __skb_flow_dissect_ipv4(skb, flow_dissector,
++ target_container, data, iph);
++
+ if (ip_is_fragment(iph)) {
+ key_control->flags |= FLOW_DIS_IS_FRAGMENT;
+
+@@ -1066,9 +1069,6 @@ proto_again:
+ }
+ }
+
+- __skb_flow_dissect_ipv4(skb, flow_dissector,
+- target_container, data, iph);
+-
+ break;
+ }
+ case htons(ETH_P_IPV6): {
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 37ef0bf098f6d..9e86c601093f4 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -885,8 +885,7 @@ static void ack_update_msk(struct mptcp_sock *msk,
+ msk->wnd_end = new_wnd_end;
+
+ /* this assumes mptcp_incoming_options() is invoked after tcp_ack() */
+- if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt)) &&
+- sk_stream_memory_free(ssk))
++ if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt)))
+ __mptcp_check_push(sk, ssk);
+
+ if (after64(new_snd_una, old_snd_una)) {
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 7345df40385ab..5932b0ebecc31 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -11,6 +11,7 @@
+ #include <linux/netdevice.h>
+ #include <linux/sched/signal.h>
+ #include <linux/atomic.h>
++#include <linux/igmp.h>
+ #include <net/sock.h>
+ #include <net/inet_common.h>
+ #include <net/inet_hashtables.h>
+@@ -19,6 +20,7 @@
+ #include <net/tcp_states.h>
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ #include <net/transp_v6.h>
++#include <net/addrconf.h>
+ #endif
+ #include <net/mptcp.h>
+ #include <net/xfrm.h>
+@@ -1440,7 +1442,7 @@ static void mptcp_push_release(struct sock *sk, struct sock *ssk,
+ release_sock(ssk);
+ }
+
+-static void mptcp_push_pending(struct sock *sk, unsigned int flags)
++static void __mptcp_push_pending(struct sock *sk, unsigned int flags)
+ {
+ struct sock *prev_ssk = NULL, *ssk = NULL;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+@@ -1568,6 +1570,9 @@ out:
+ mptcp_set_timeout(sk, ssk);
+ tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
+ info.size_goal);
++ if (!mptcp_timer_pending(sk))
++ mptcp_reset_timer(sk);
++
+ if (msk->snd_data_fin_enable &&
+ msk->snd_nxt + 1 == msk->write_seq)
+ mptcp_schedule_work(sk);
+@@ -1676,14 +1681,14 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+
+ wait_for_memory:
+ set_bit(MPTCP_NOSPACE, &msk->flags);
+- mptcp_push_pending(sk, msg->msg_flags);
++ __mptcp_push_pending(sk, msg->msg_flags);
+ ret = sk_stream_wait_memory(sk, &timeo);
+ if (ret)
+ goto out;
+ }
+
+ if (copied)
+- mptcp_push_pending(sk, msg->msg_flags);
++ __mptcp_push_pending(sk, msg->msg_flags);
+
+ out:
+ release_sock(sk);
+@@ -2289,13 +2294,12 @@ static void mptcp_worker(struct work_struct *work)
+ __mptcp_check_send_data_fin(sk);
+ mptcp_check_data_fin(sk);
+
+- /* if the msk data is completely acked, or the socket timedout,
+- * there is no point in keeping around an orphaned sk
++ /* There is no point in keeping around an orphaned sk timedout or
++ * closed, but we need the msk around to reply to incoming DATA_FIN,
++ * even if it is orphaned and in FIN_WAIT2 state
+ */
+ if (sock_flag(sk, SOCK_DEAD) &&
+- (mptcp_check_close_timeout(sk) ||
+- (state != sk->sk_state &&
+- ((1 << inet_sk_state_load(sk)) & (TCPF_CLOSE | TCPF_FIN_WAIT2))))) {
++ (mptcp_check_close_timeout(sk) || sk->sk_state == TCP_CLOSE)) {
+ inet_sk_state_store(sk, TCP_CLOSE);
+ __mptcp_destroy_sock(sk);
+ goto unlock;
+@@ -2940,13 +2944,14 @@ static void mptcp_release_cb(struct sock *sk)
+ {
+ unsigned long flags, nflags;
+
+- /* push_pending may touch wmem_reserved, do it before the later
+- * cleanup
+- */
+- if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
+- __mptcp_clean_una(sk);
+- if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) {
+- /* mptcp_push_pending() acquires the subflow socket lock
++ for (;;) {
++ flags = 0;
++ if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags))
++ flags |= BIT(MPTCP_PUSH_PENDING);
++ if (!flags)
++ break;
++
++ /* the following actions acquire the subflow socket lock
+ *
+ * 1) can't be invoked in atomic scope
+ * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
+@@ -2955,11 +2960,21 @@ static void mptcp_release_cb(struct sock *sk)
+ */
+
+ spin_unlock_bh(&sk->sk_lock.slock);
+- mptcp_push_pending(sk, 0);
++ if (flags & BIT(MPTCP_PUSH_PENDING))
++ __mptcp_push_pending(sk, 0);
++
++ cond_resched();
+ spin_lock_bh(&sk->sk_lock.slock);
+ }
+
+- /* clear any wmem reservation and errors */
++ if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
++ __mptcp_clean_una(sk);
++ if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
++ __mptcp_error_report(sk);
++
++ /* push_pending may touch wmem_reserved, ensure we do the cleanup
++ * later
++ */
+ __mptcp_update_wmem(sk);
+ __mptcp_update_rmem(sk);
+
+@@ -3318,7 +3333,7 @@ static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
+ struct sock *sk = (struct sock *)msk;
+
+ if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
+- return 0;
++ return EPOLLOUT | EPOLLWRNORM;
+
+ if (sk_stream_is_writeable(sk))
+ return EPOLLOUT | EPOLLWRNORM;
+@@ -3351,16 +3366,47 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
+ mask |= mptcp_check_readable(msk);
+ mask |= mptcp_check_writeable(msk);
+ }
++ if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
++ mask |= EPOLLHUP;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+
++ /* This barrier is coupled with smp_wmb() in tcp_reset() */
++ smp_rmb();
++ if (sk->sk_err)
++ mask |= EPOLLERR;
++
+ return mask;
+ }
+
++static int mptcp_release(struct socket *sock)
++{
++ struct mptcp_subflow_context *subflow;
++ struct sock *sk = sock->sk;
++ struct mptcp_sock *msk;
++
++ if (!sk)
++ return 0;
++
++ lock_sock(sk);
++
++ msk = mptcp_sk(sk);
++
++ mptcp_for_each_subflow(msk, subflow) {
++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++
++ ip_mc_drop_socket(ssk);
++ }
++
++ release_sock(sk);
++
++ return inet_release(sock);
++}
++
+ static const struct proto_ops mptcp_stream_ops = {
+ .family = PF_INET,
+ .owner = THIS_MODULE,
+- .release = inet_release,
++ .release = mptcp_release,
+ .bind = mptcp_bind,
+ .connect = mptcp_stream_connect,
+ .socketpair = sock_no_socketpair,
+@@ -3407,10 +3453,35 @@ void __init mptcp_proto_init(void)
+ }
+
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
++static int mptcp6_release(struct socket *sock)
++{
++ struct mptcp_subflow_context *subflow;
++ struct mptcp_sock *msk;
++ struct sock *sk = sock->sk;
++
++ if (!sk)
++ return 0;
++
++ lock_sock(sk);
++
++ msk = mptcp_sk(sk);
++
++ mptcp_for_each_subflow(msk, subflow) {
++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++
++ ip_mc_drop_socket(ssk);
++ ipv6_sock_mc_close(ssk);
++ ipv6_sock_ac_close(ssk);
++ }
++
++ release_sock(sk);
++ return inet6_release(sock);
++}
++
+ static const struct proto_ops mptcp_v6_stream_ops = {
+ .family = PF_INET6,
+ .owner = THIS_MODULE,
+- .release = inet6_release,
++ .release = mptcp6_release,
+ .bind = mptcp_bind,
+ .connect = mptcp_stream_connect,
+ .socketpair = sock_no_socketpair,
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index c374345ad1349..62288836d0534 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -96,6 +96,7 @@
+ #define MPTCP_WORK_CLOSE_SUBFLOW 5
+ #define MPTCP_PUSH_PENDING 6
+ #define MPTCP_CLEAN_UNA 7
++#define MPTCP_ERROR_REPORT 8
+
+ static inline bool before64(__u64 seq1, __u64 seq2)
+ {
+@@ -413,6 +414,7 @@ struct mptcp_subflow_context {
+ void (*tcp_data_ready)(struct sock *sk);
+ void (*tcp_state_change)(struct sock *sk);
+ void (*tcp_write_space)(struct sock *sk);
++ void (*tcp_error_report)(struct sock *sk);
+
+ struct rcu_head rcu;
+ };
+@@ -478,6 +480,7 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
+ sk->sk_data_ready = ctx->tcp_data_ready;
+ sk->sk_state_change = ctx->tcp_state_change;
+ sk->sk_write_space = ctx->tcp_write_space;
++ sk->sk_error_report = ctx->tcp_error_report;
+
+ inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops;
+ }
+@@ -505,6 +508,7 @@ bool mptcp_finish_join(struct sock *sk);
+ bool mptcp_schedule_work(struct sock *sk);
+ void __mptcp_check_push(struct sock *sk, struct sock *ssk);
+ void __mptcp_data_acked(struct sock *sk);
++void __mptcp_error_report(struct sock *sk);
+ void mptcp_subflow_eof(struct sock *sk);
+ bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit);
+ void __mptcp_flush_join_list(struct mptcp_sock *msk);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 96e040951cd40..f97f29df4505e 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -92,7 +92,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
+ return msk;
+ }
+
+-static int __subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
++static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
+ {
+ struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+
+@@ -100,16 +100,6 @@ static int __subflow_init_req(struct request_sock *req, const struct sock *sk_li
+ subflow_req->mp_join = 0;
+ subflow_req->msk = NULL;
+ mptcp_token_init_request(req);
+-
+-#ifdef CONFIG_TCP_MD5SIG
+- /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
+- * TCP option space.
+- */
+- if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
+- return -EINVAL;
+-#endif
+-
+- return 0;
+ }
+
+ /* Init mptcp request socket.
+@@ -117,20 +107,23 @@ static int __subflow_init_req(struct request_sock *req, const struct sock *sk_li
+ * Returns an error code if a JOIN has failed and a TCP reset
+ * should be sent.
+ */
+-static int subflow_init_req(struct request_sock *req,
+- const struct sock *sk_listener,
+- struct sk_buff *skb)
++static int subflow_check_req(struct request_sock *req,
++ const struct sock *sk_listener,
++ struct sk_buff *skb)
+ {
+ struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
+ struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+ struct mptcp_options_received mp_opt;
+- int ret;
+
+ pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
+
+- ret = __subflow_init_req(req, sk_listener);
+- if (ret)
+- return 0;
++#ifdef CONFIG_TCP_MD5SIG
++ /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
++ * TCP option space.
++ */
++ if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
++ return -EINVAL;
++#endif
+
+ mptcp_get_options(skb, &mp_opt);
+
+@@ -205,10 +198,7 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
+ struct mptcp_options_received mp_opt;
+ int err;
+
+- err = __subflow_init_req(req, sk_listener);
+- if (err)
+- return err;
+-
++ subflow_init_req(req, sk_listener);
+ mptcp_get_options(skb, &mp_opt);
+
+ if (mp_opt.mp_capable && mp_opt.mp_join)
+@@ -248,12 +238,13 @@ static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
+ int err;
+
+ tcp_rsk(req)->is_mptcp = 1;
++ subflow_init_req(req, sk);
+
+ dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
+ if (!dst)
+ return NULL;
+
+- err = subflow_init_req(req, sk, skb);
++ err = subflow_check_req(req, sk, skb);
+ if (err == 0)
+ return dst;
+
+@@ -273,12 +264,13 @@ static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
+ int err;
+
+ tcp_rsk(req)->is_mptcp = 1;
++ subflow_init_req(req, sk);
+
+ dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
+ if (!dst)
+ return NULL;
+
+- err = subflow_init_req(req, sk, skb);
++ err = subflow_check_req(req, sk, skb);
+ if (err == 0)
+ return dst;
+
+@@ -1054,6 +1046,46 @@ static void subflow_write_space(struct sock *ssk)
+ /* we take action in __mptcp_clean_una() */
+ }
+
++void __mptcp_error_report(struct sock *sk)
++{
++ struct mptcp_subflow_context *subflow;
++ struct mptcp_sock *msk = mptcp_sk(sk);
++
++ mptcp_for_each_subflow(msk, subflow) {
++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++ int err = sock_error(ssk);
++
++ if (!err)
++ continue;
++
++ /* only propagate errors on fallen-back sockets or
++ * on MPC connect
++ */
++ if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
++ continue;
++
++ inet_sk_state_store(sk, inet_sk_state_load(ssk));
++ sk->sk_err = -err;
++
++ /* This barrier is coupled with smp_rmb() in mptcp_poll() */
++ smp_wmb();
++ sk->sk_error_report(sk);
++ break;
++ }
++}
++
++static void subflow_error_report(struct sock *ssk)
++{
++ struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
++
++ mptcp_data_lock(sk);
++ if (!sock_owned_by_user(sk))
++ __mptcp_error_report(sk);
++ else
++ set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags);
++ mptcp_data_unlock(sk);
++}
++
+ static struct inet_connection_sock_af_ops *
+ subflow_default_af_ops(struct sock *sk)
+ {
+@@ -1367,9 +1399,11 @@ static int subflow_ulp_init(struct sock *sk)
+ ctx->tcp_data_ready = sk->sk_data_ready;
+ ctx->tcp_state_change = sk->sk_state_change;
+ ctx->tcp_write_space = sk->sk_write_space;
++ ctx->tcp_error_report = sk->sk_error_report;
+ sk->sk_data_ready = subflow_data_ready;
+ sk->sk_write_space = subflow_write_space;
+ sk->sk_state_change = subflow_state_change;
++ sk->sk_error_report = subflow_error_report;
+ out:
+ return err;
+ }
+@@ -1422,6 +1456,7 @@ static void subflow_ulp_clone(const struct request_sock *req,
+ new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
+ new_ctx->tcp_state_change = old_ctx->tcp_state_change;
+ new_ctx->tcp_write_space = old_ctx->tcp_write_space;
++ new_ctx->tcp_error_report = old_ctx->tcp_error_report;
+ new_ctx->rel_write_seq = 1;
+ new_ctx->tcp_sock = newsk;
+
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index bd4678db9d76b..6dff64374bfe1 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1825,11 +1825,14 @@ static int
+ svcauth_gss_release(struct svc_rqst *rqstp)
+ {
+ struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
+- struct rpc_gss_wire_cred *gc = &gsd->clcred;
++ struct rpc_gss_wire_cred *gc;
+ struct xdr_buf *resbuf = &rqstp->rq_res;
+ int stat = -EINVAL;
+ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
+
++ if (!gsd)
++ goto out;
++ gc = &gsd->clcred;
+ if (gc->gc_proc != RPC_GSS_PROC_DATA)
+ goto out;
+ /* Release can be called twice, but we only wrap once. */
+@@ -1870,10 +1873,10 @@ out_err:
+ if (rqstp->rq_cred.cr_group_info)
+ put_group_info(rqstp->rq_cred.cr_group_info);
+ rqstp->rq_cred.cr_group_info = NULL;
+- if (gsd->rsci)
++ if (gsd && gsd->rsci) {
+ cache_put(&gsd->rsci->h, sn->rsc_cache);
+- gsd->rsci = NULL;
+-
++ gsd->rsci = NULL;
++ }
+ return stat;
+ }
+
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 253d538251ae1..89a80beab5876 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1023,8 +1023,12 @@ static int azx_prepare(struct device *dev)
+ struct snd_card *card = dev_get_drvdata(dev);
+ struct azx *chip;
+
++ if (!azx_is_pm_ready(card))
++ return 0;
++
+ chip = card->private_data;
+ chip->pm_prepared = 1;
++ snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+
+ flush_work(&azx_bus(chip)->unsol_work);
+
+@@ -1039,7 +1043,11 @@ static void azx_complete(struct device *dev)
+ struct snd_card *card = dev_get_drvdata(dev);
+ struct azx *chip;
+
++ if (!azx_is_pm_ready(card))
++ return;
++
+ chip = card->private_data;
++ snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+ chip->pm_prepared = 0;
+ }
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 316b9b4ccb32d..58946d069ee59 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5256,7 +5256,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
+ case 0x10ec0274:
+ case 0x10ec0294:
+ alc_process_coef_fw(codec, coef0274);
+- msleep(80);
++ msleep(850);
+ val = alc_read_coef_idx(codec, 0x46);
+ is_ctia = (val & 0x00f0) == 0x00f0;
+ break;
+@@ -5440,6 +5440,7 @@ static void alc_update_headset_jack_cb(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
+ {
+ snd_hda_gen_hp_automute(codec, jack);
++ alc_update_headset_mode(codec);
+ }
+
+ static void alc_probe_headset_mode(struct hda_codec *codec)
+@@ -8057,6 +8058,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
+index 210fcbedf2413..4d82d24c7828d 100644
+--- a/sound/soc/codecs/cs42l42.c
++++ b/sound/soc/codecs/cs42l42.c
+@@ -401,7 +401,7 @@ static const struct regmap_config cs42l42_regmap = {
+ };
+
+ static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
+-static DECLARE_TLV_DB_SCALE(mixer_tlv, -6200, 100, false);
++static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true);
+
+ static const char * const cs42l42_hpf_freq_text[] = {
+ "1.86Hz", "120Hz", "235Hz", "466Hz"
+@@ -458,7 +458,7 @@ static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
+ CS42L42_DAC_HPF_EN_SHIFT, true, false),
+ SOC_DOUBLE_R_TLV("Mixer Volume", CS42L42_MIXER_CHA_VOL,
+ CS42L42_MIXER_CHB_VOL, CS42L42_MIXER_CH_VOL_SHIFT,
+- 0x3e, 1, mixer_tlv)
++ 0x3f, 1, mixer_tlv)
+ };
+
+ static int cs42l42_hpdrv_evt(struct snd_soc_dapm_widget *w,
+@@ -691,24 +691,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
+ CS42L42_CLK_OASRC_SEL_MASK,
+ CS42L42_CLK_OASRC_SEL_12 <<
+ CS42L42_CLK_OASRC_SEL_SHIFT);
+- /* channel 1 on low LRCLK, 32 bit */
+- snd_soc_component_update_bits(component,
+- CS42L42_ASP_RX_DAI0_CH1_AP_RES,
+- CS42L42_ASP_RX_CH_AP_MASK |
+- CS42L42_ASP_RX_CH_RES_MASK,
+- (CS42L42_ASP_RX_CH_AP_LOW <<
+- CS42L42_ASP_RX_CH_AP_SHIFT) |
+- (CS42L42_ASP_RX_CH_RES_32 <<
+- CS42L42_ASP_RX_CH_RES_SHIFT));
+- /* Channel 2 on high LRCLK, 32 bit */
+- snd_soc_component_update_bits(component,
+- CS42L42_ASP_RX_DAI0_CH2_AP_RES,
+- CS42L42_ASP_RX_CH_AP_MASK |
+- CS42L42_ASP_RX_CH_RES_MASK,
+- (CS42L42_ASP_RX_CH_AP_HI <<
+- CS42L42_ASP_RX_CH_AP_SHIFT) |
+- (CS42L42_ASP_RX_CH_RES_32 <<
+- CS42L42_ASP_RX_CH_RES_SHIFT));
+ if (pll_ratio_table[i].mclk_src_sel == 0) {
+ /* Pass the clock straight through */
+ snd_soc_component_update_bits(component,
+@@ -797,27 +779,23 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+ /* Bitclock/frame inversion */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
++ asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+- asp_cfg_val |= CS42L42_ASP_POL_INV <<
+- CS42L42_ASP_LCPOL_IN_SHIFT;
++ asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT;
++ asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+- asp_cfg_val |= CS42L42_ASP_POL_INV <<
+- CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+- asp_cfg_val |= CS42L42_ASP_POL_INV <<
+- CS42L42_ASP_LCPOL_IN_SHIFT;
+- asp_cfg_val |= CS42L42_ASP_POL_INV <<
+- CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
++ asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT;
+ break;
+ }
+
+- snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG,
+- CS42L42_ASP_MODE_MASK |
+- CS42L42_ASP_SCPOL_IN_DAC_MASK |
+- CS42L42_ASP_LCPOL_IN_MASK, asp_cfg_val);
++ snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG, CS42L42_ASP_MODE_MASK |
++ CS42L42_ASP_SCPOL_MASK |
++ CS42L42_ASP_LCPOL_MASK,
++ asp_cfg_val);
+
+ return 0;
+ }
+@@ -828,14 +806,29 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
+ {
+ struct snd_soc_component *component = dai->component;
+ struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
+- int retval;
++ unsigned int width = (params_width(params) / 8) - 1;
++ unsigned int val = 0;
+
+ cs42l42->srate = params_rate(params);
+- cs42l42->swidth = params_width(params);
+
+- retval = cs42l42_pll_config(component);
++ switch(substream->stream) {
++ case SNDRV_PCM_STREAM_PLAYBACK:
++ val |= width << CS42L42_ASP_RX_CH_RES_SHIFT;
++ /* channel 1 on low LRCLK */
++ snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH1_AP_RES,
++ CS42L42_ASP_RX_CH_AP_MASK |
++ CS42L42_ASP_RX_CH_RES_MASK, val);
++ /* Channel 2 on high LRCLK */
++ val |= CS42L42_ASP_RX_CH_AP_HI << CS42L42_ASP_RX_CH_AP_SHIFT;
++ snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH2_AP_RES,
++ CS42L42_ASP_RX_CH_AP_MASK |
++ CS42L42_ASP_RX_CH_RES_MASK, val);
++ break;
++ default:
++ break;
++ }
+
+- return retval;
++ return cs42l42_pll_config(component);
+ }
+
+ static int cs42l42_set_sysclk(struct snd_soc_dai *dai,
+@@ -900,9 +893,9 @@ static int cs42l42_mute(struct snd_soc_dai *dai, int mute, int direction)
+ return 0;
+ }
+
+-#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE | \
+- SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE | \
+- SNDRV_PCM_FMTBIT_S32_LE)
++#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
++ SNDRV_PCM_FMTBIT_S24_LE |\
++ SNDRV_PCM_FMTBIT_S32_LE )
+
+
+ static const struct snd_soc_dai_ops cs42l42_ops = {
+@@ -1801,7 +1794,7 @@ static int cs42l42_i2c_probe(struct i2c_client *i2c_client,
+ dev_dbg(&i2c_client->dev, "Found reset GPIO\n");
+ gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
+ }
+- mdelay(3);
++ usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
+
+ /* Request IRQ */
+ ret = devm_request_threaded_irq(&i2c_client->dev,
+@@ -1926,6 +1919,7 @@ static int cs42l42_runtime_resume(struct device *dev)
+ }
+
+ gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
++ usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
+
+ regcache_cache_only(cs42l42->regmap, false);
+ regcache_sync(cs42l42->regmap);
+diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h
+index 9e3cc528dcff0..866d7c873e3c9 100644
+--- a/sound/soc/codecs/cs42l42.h
++++ b/sound/soc/codecs/cs42l42.h
+@@ -258,11 +258,12 @@
+ #define CS42L42_ASP_SLAVE_MODE 0x00
+ #define CS42L42_ASP_MODE_SHIFT 4
+ #define CS42L42_ASP_MODE_MASK (1 << CS42L42_ASP_MODE_SHIFT)
+-#define CS42L42_ASP_SCPOL_IN_DAC_SHIFT 2
+-#define CS42L42_ASP_SCPOL_IN_DAC_MASK (1 << CS42L42_ASP_SCPOL_IN_DAC_SHIFT)
+-#define CS42L42_ASP_LCPOL_IN_SHIFT 0
+-#define CS42L42_ASP_LCPOL_IN_MASK (1 << CS42L42_ASP_LCPOL_IN_SHIFT)
+-#define CS42L42_ASP_POL_INV 1
++#define CS42L42_ASP_SCPOL_SHIFT 2
++#define CS42L42_ASP_SCPOL_MASK (3 << CS42L42_ASP_SCPOL_SHIFT)
++#define CS42L42_ASP_SCPOL_NOR 3
++#define CS42L42_ASP_LCPOL_SHIFT 0
++#define CS42L42_ASP_LCPOL_MASK (3 << CS42L42_ASP_LCPOL_SHIFT)
++#define CS42L42_ASP_LCPOL_INV 3
+
+ #define CS42L42_ASP_FRM_CFG (CS42L42_PAGE_12 + 0x08)
+ #define CS42L42_ASP_STP_SHIFT 4
+@@ -739,6 +740,7 @@
+ #define CS42L42_FRAC2_VAL(val) (((val) & 0xff0000) >> 16)
+
+ #define CS42L42_NUM_SUPPLIES 5
++#define CS42L42_BOOT_TIME_US 3000
+
+ static const char *const cs42l42_supply_names[CS42L42_NUM_SUPPLIES] = {
+ "VA",
+@@ -756,7 +758,6 @@ struct cs42l42_private {
+ struct completion pdn_done;
+ u32 sclk;
+ u32 srate;
+- u32 swidth;
+ u8 plug_state;
+ u8 hs_type;
+ u8 ts_inv;
+diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
+index f9ec5cf825991..ec2f11ff8a84d 100644
+--- a/sound/soc/codecs/es8316.c
++++ b/sound/soc/codecs/es8316.c
+@@ -63,13 +63,8 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv,
+ 1, 1, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 2, 2, TLV_DB_SCALE_ITEM(250, 0, 0),
+ 3, 3, TLV_DB_SCALE_ITEM(450, 0, 0),
+- 4, 4, TLV_DB_SCALE_ITEM(700, 0, 0),
+- 5, 5, TLV_DB_SCALE_ITEM(1000, 0, 0),
+- 6, 6, TLV_DB_SCALE_ITEM(1300, 0, 0),
+- 7, 7, TLV_DB_SCALE_ITEM(1600, 0, 0),
+- 8, 8, TLV_DB_SCALE_ITEM(1800, 0, 0),
+- 9, 9, TLV_DB_SCALE_ITEM(2100, 0, 0),
+- 10, 10, TLV_DB_SCALE_ITEM(2400, 0, 0),
++ 4, 7, TLV_DB_SCALE_ITEM(700, 300, 0),
++ 8, 10, TLV_DB_SCALE_ITEM(1800, 300, 0),
+ );
+
+ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpout_vol_tlv,
+diff --git a/sound/soc/codecs/rt1015.c b/sound/soc/codecs/rt1015.c
+index 32e6bcf763d1d..4607039a16e7f 100644
+--- a/sound/soc/codecs/rt1015.c
++++ b/sound/soc/codecs/rt1015.c
+@@ -209,6 +209,7 @@ static bool rt1015_volatile_register(struct device *dev, unsigned int reg)
+ case RT1015_VENDOR_ID:
+ case RT1015_DEVICE_ID:
+ case RT1015_PRO_ALT:
++ case RT1015_MAN_I2C:
+ case RT1015_DAC3:
+ case RT1015_VBAT_TEST_OUT1:
+ case RT1015_VBAT_TEST_OUT2:
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index 1414ad15d01cf..a5674c227b3a6 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -339,9 +339,9 @@ static bool rt5640_readable_register(struct device *dev, unsigned int reg)
+ }
+
+ static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
+-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
++static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0);
+ static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
+-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
++static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000);
+ static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
+
+ /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
+diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
+index d198e191fb0c9..e59fdc81dbd45 100644
+--- a/sound/soc/codecs/rt5651.c
++++ b/sound/soc/codecs/rt5651.c
+@@ -285,9 +285,9 @@ static bool rt5651_readable_register(struct device *dev, unsigned int reg)
+ }
+
+ static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
+-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
++static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0);
+ static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
+-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
++static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000);
+ static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
+
+ /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
+diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
+index 41e5917b16a5e..91a4ef7f620ca 100644
+--- a/sound/soc/codecs/rt5659.c
++++ b/sound/soc/codecs/rt5659.c
+@@ -3426,12 +3426,17 @@ static int rt5659_set_component_sysclk(struct snd_soc_component *component, int
+ {
+ struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
+ unsigned int reg_val = 0;
++ int ret;
+
+ if (freq == rt5659->sysclk && clk_id == rt5659->sysclk_src)
+ return 0;
+
+ switch (clk_id) {
+ case RT5659_SCLK_S_MCLK:
++ ret = clk_set_rate(rt5659->mclk, freq);
++ if (ret)
++ return ret;
++
+ reg_val |= RT5659_SCLK_SRC_MCLK;
+ break;
+ case RT5659_SCLK_S_PLL1:
+diff --git a/sound/soc/codecs/rt711.c b/sound/soc/codecs/rt711.c
+index 85f744184a60f..047f4e677d78c 100644
+--- a/sound/soc/codecs/rt711.c
++++ b/sound/soc/codecs/rt711.c
+@@ -895,6 +895,13 @@ static int rt711_probe(struct snd_soc_component *component)
+ return 0;
+ }
+
++static void rt711_remove(struct snd_soc_component *component)
++{
++ struct rt711_priv *rt711 = snd_soc_component_get_drvdata(component);
++
++ regcache_cache_only(rt711->regmap, true);
++}
++
+ static const struct snd_soc_component_driver soc_codec_dev_rt711 = {
+ .probe = rt711_probe,
+ .set_bias_level = rt711_set_bias_level,
+@@ -905,6 +912,7 @@ static const struct snd_soc_component_driver soc_codec_dev_rt711 = {
+ .dapm_routes = rt711_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(rt711_audio_map),
+ .set_jack = rt711_set_jack_detect,
++ .remove = rt711_remove,
+ };
+
+ static int rt711_set_sdw_stream(struct snd_soc_dai *dai, void *sdw_stream,
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index 4d6ff81146228..4c0e87e22b97b 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -71,7 +71,7 @@ static const struct reg_default sgtl5000_reg_defaults[] = {
+ { SGTL5000_DAP_EQ_BASS_BAND4, 0x002f },
+ { SGTL5000_DAP_MAIN_CHAN, 0x8000 },
+ { SGTL5000_DAP_MIX_CHAN, 0x0000 },
+- { SGTL5000_DAP_AVC_CTRL, 0x0510 },
++ { SGTL5000_DAP_AVC_CTRL, 0x5100 },
+ { SGTL5000_DAP_AVC_THRESHOLD, 0x1473 },
+ { SGTL5000_DAP_AVC_ATTACK, 0x0028 },
+ { SGTL5000_DAP_AVC_DECAY, 0x0050 },
+diff --git a/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c b/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
+index 8383536b7ae00..504293de2c0d0 100644
+--- a/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
++++ b/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
+@@ -555,7 +555,9 @@ static int mtk_dai_tdm_hw_params(struct snd_pcm_substream *substream,
+
+ /* set tdm */
+ if (tdm_priv->bck_invert)
+- tdm_con |= 1 << BCK_INVERSE_SFT;
++ regmap_update_bits(afe->regmap, AUDIO_TOP_CON3,
++ BCK_INVERSE_MASK_SFT,
++ 0x1 << BCK_INVERSE_SFT);
+
+ if (tdm_priv->lck_invert)
+ tdm_con |= 1 << LRCK_INVERSE_SFT;
+diff --git a/sound/soc/mediatek/mt8192/mt8192-reg.h b/sound/soc/mediatek/mt8192/mt8192-reg.h
+index 562f25c79c349..b9fb80d4afecd 100644
+--- a/sound/soc/mediatek/mt8192/mt8192-reg.h
++++ b/sound/soc/mediatek/mt8192/mt8192-reg.h
+@@ -21,6 +21,11 @@ enum {
+ /*****************************************************************************
+ * R E G I S T E R D E F I N I T I O N
+ *****************************************************************************/
++/* AUDIO_TOP_CON3 */
++#define BCK_INVERSE_SFT 3
++#define BCK_INVERSE_MASK 0x1
++#define BCK_INVERSE_MASK_SFT (0x1 << 3)
++
+ /* AFE_DAC_CON0 */
+ #define VUL12_ON_SFT 31
+ #define VUL12_ON_MASK 0x1
+@@ -2079,9 +2084,6 @@ enum {
+ #define TDM_EN_SFT 0
+ #define TDM_EN_MASK 0x1
+ #define TDM_EN_MASK_SFT (0x1 << 0)
+-#define BCK_INVERSE_SFT 1
+-#define BCK_INVERSE_MASK 0x1
+-#define BCK_INVERSE_MASK_SFT (0x1 << 1)
+ #define LRCK_INVERSE_SFT 2
+ #define LRCK_INVERSE_MASK 0x1
+ #define LRCK_INVERSE_MASK_SFT (0x1 << 2)
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index f6d4e99b590c7..0cffc9527e289 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -31,6 +31,7 @@
+ #include <linux/of.h>
+ #include <linux/of_graph.h>
+ #include <linux/dmi.h>
++#include <linux/acpi.h>
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+ #include <sound/pcm_params.h>
+@@ -1573,6 +1574,9 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
+ if (card->long_name)
+ return 0; /* long name already set by driver or from DMI */
+
++ if (!is_acpi_device_node(card->dev->fwnode))
++ return 0;
++
+ /* make up dmi long name as: vendor-product-version-board */
+ vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
+ if (!vendor || !is_dmi_valid(vendor)) {
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index d3001fb18141f..176437a441e6c 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1521,6 +1521,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
+ case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
+ case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */
++ case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */
+ return true;
+ }
+
+diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh
+index 058c746ee3006..b11d8e6b5bc14 100755
+--- a/tools/testing/selftests/net/forwarding/tc_flower.sh
++++ b/tools/testing/selftests/net/forwarding/tc_flower.sh
+@@ -3,7 +3,7 @@
+
+ ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \
+ match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test \
+- match_ip_tos_test match_indev_test"
++ match_ip_tos_test match_indev_test match_ip_ttl_test"
+ NUM_NETIFS=2
+ source tc_common.sh
+ source lib.sh
+@@ -310,6 +310,42 @@ match_ip_tos_test()
+ log_test "ip_tos match ($tcflags)"
+ }
+
++match_ip_ttl_test()
++{
++ RET=0
++
++ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
++ $tcflags dst_ip 192.0.2.2 ip_ttl 63 action drop
++ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
++ $tcflags dst_ip 192.0.2.2 action drop
++
++ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
++ -t ip "ttl=63" -q
++
++ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
++ -t ip "ttl=63,mf,frag=256" -q
++
++ tc_check_packets "dev $h2 ingress" 102 1
++ check_fail $? "Matched on the wrong filter (no check on ttl)"
++
++ tc_check_packets "dev $h2 ingress" 101 2
++ check_err $? "Did not match on correct filter (ttl=63)"
++
++ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
++ -t ip "ttl=255" -q
++
++ tc_check_packets "dev $h2 ingress" 101 3
++ check_fail $? "Matched on a wrong filter (ttl=63)"
++
++ tc_check_packets "dev $h2 ingress" 102 1
++ check_err $? "Did not match on correct filter (no check on ttl)"
++
++ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
++ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
++
++ log_test "ip_ttl match ($tcflags)"
++}
++
+ match_indev_test()
+ {
+ RET=0