summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2021-03-11 10:09:28 -0500
committerMike Pagano <mpagano@gentoo.org>2021-03-11 10:09:28 -0500
commit73edef365ba7c6ee139a8fc6839ac2bf818f38bd (patch)
treef9db0e201abd72e888018905357745c83aef9afd
parentLinux patch 5.11.5 (diff)
downloadlinux-patches-73edef365ba7c6ee139a8fc6839ac2bf818f38bd.tar.gz
linux-patches-73edef365ba7c6ee139a8fc6839ac2bf818f38bd.tar.bz2
linux-patches-73edef365ba7c6ee139a8fc6839ac2bf818f38bd.zip
Linux patch 5.11.65.11-7
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1005_linux-5.11.6.patch2204
2 files changed, 2208 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index e8533bff..4b555a51 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch: 1004_linux-5.11.5.patch
From: http://www.kernel.org
Desc: Linux 5.11.5
+Patch: 1005_linux-5.11.6.patch
+From: http://www.kernel.org
+Desc: Linux 5.11.6
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1005_linux-5.11.6.patch b/1005_linux-5.11.6.patch
new file mode 100644
index 00000000..a69ad3b4
--- /dev/null
+++ b/1005_linux-5.11.6.patch
@@ -0,0 +1,2204 @@
+diff --git a/Makefile b/Makefile
+index 1673c12fb4b35..472136a7881e6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 3dfb25afa616f..e42da99db91fc 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -952,8 +952,9 @@ choice
+ that is selected here.
+
+ config CPU_BIG_ENDIAN
+- bool "Build big-endian kernel"
+- help
++ bool "Build big-endian kernel"
++ depends on !LD_IS_LLD || LLD_VERSION >= 130000
++ help
+ Say Y if you plan on running a kernel with a big-endian userspace.
+
+ config CPU_LITTLE_ENDIAN
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index 278462186ac47..22cf9da1e4a77 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -201,9 +201,12 @@ config PREFETCH
+ def_bool y
+ depends on PA8X00 || PA7200
+
++config PARISC_HUGE_KERNEL
++ def_bool y if !MODULES || UBSAN || FTRACE || COMPILE_TEST
++
+ config MLONGCALLS
+- def_bool y if !MODULES || UBSAN || FTRACE
+- bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
++ def_bool y if PARISC_HUGE_KERNEL
++ bool "Enable the -mlong-calls compiler option for big kernels" if !PARISC_HUGE_KERNEL
+ depends on PA8X00
+ help
+ If you configure the kernel to include many drivers built-in instead
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 825ef6d281c98..6a0670548125f 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1205,6 +1205,7 @@ static void init_vmcb(struct vcpu_svm *svm)
+ init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
+ init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
+
++ svm_set_cr4(&svm->vcpu, 0);
+ svm_set_efer(&svm->vcpu, 0);
+ save->dr6 = 0xffff0ff0;
+ kvm_set_rflags(&svm->vcpu, 2);
+diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
+index 9f0219a8cb985..dd7efafcb1034 100644
+--- a/drivers/acpi/acpica/acobject.h
++++ b/drivers/acpi/acpica/acobject.h
+@@ -284,6 +284,7 @@ struct acpi_object_addr_handler {
+ acpi_adr_space_handler handler;
+ struct acpi_namespace_node *node; /* Parent device */
+ void *context;
++ acpi_mutex context_mutex;
+ acpi_adr_space_setup setup;
+ union acpi_operand_object *region_list; /* Regions using this handler */
+ union acpi_operand_object *next;
+diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
+index 5884eba047f73..3438dc187efb6 100644
+--- a/drivers/acpi/acpica/evhandler.c
++++ b/drivers/acpi/acpica/evhandler.c
+@@ -489,6 +489,13 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node,
+
+ /* Init handler obj */
+
++ status =
++ acpi_os_create_mutex(&handler_obj->address_space.context_mutex);
++ if (ACPI_FAILURE(status)) {
++ acpi_ut_remove_reference(handler_obj);
++ goto unlock_and_exit;
++ }
++
+ handler_obj->address_space.space_id = (u8)space_id;
+ handler_obj->address_space.handler_flags = flags;
+ handler_obj->address_space.region_list = NULL;
+diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
+index a8a4c8c9b9efa..7701ae67e091a 100644
+--- a/drivers/acpi/acpica/evregion.c
++++ b/drivers/acpi/acpica/evregion.c
+@@ -112,6 +112,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ union acpi_operand_object *region_obj2;
+ void *region_context = NULL;
+ struct acpi_connection_info *context;
++ acpi_mutex context_mutex;
++ u8 context_locked;
+ acpi_physical_address address;
+
+ ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
+@@ -136,6 +138,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ }
+
+ context = handler_desc->address_space.context;
++ context_mutex = handler_desc->address_space.context_mutex;
++ context_locked = FALSE;
+
+ /*
+ * It may be the case that the region has never been initialized.
+@@ -204,6 +208,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ handler = handler_desc->address_space.handler;
+ address = (region_obj->region.address + region_offset);
+
++ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
++ "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
++ &region_obj->region.handler->address_space, handler,
++ ACPI_FORMAT_UINT64(address),
++ acpi_ut_get_region_name(region_obj->region.
++ space_id)));
++
++ if (!(handler_desc->address_space.handler_flags &
++ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
++ /*
++ * For handlers other than the default (supplied) handlers, we must
++ * exit the interpreter because the handler *might* block -- we don't
++ * know what it will do, so we can't hold the lock on the interpreter.
++ */
++ acpi_ex_exit_interpreter();
++ }
++
+ /*
+ * Special handling for generic_serial_bus and general_purpose_io:
+ * There are three extra parameters that must be passed to the
+@@ -212,6 +233,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ * 2) Length of the above buffer
+ * 3) Actual access length from the access_as() op
+ *
++ * Since we pass these extra parameters via the context, which is
++ * shared between threads, we must lock the context to avoid these
++ * parameters being changed from another thread before the handler
++ * has completed running.
++ *
+ * In addition, for general_purpose_io, the Address and bit_width fields
+ * are defined as follows:
+ * 1) Address is the pin number index of the field (bit offset from
+@@ -221,6 +247,14 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
+ context && field_obj) {
+
++ status =
++ acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER);
++ if (ACPI_FAILURE(status)) {
++ goto re_enter_interpreter;
++ }
++
++ context_locked = TRUE;
++
+ /* Get the Connection (resource_template) buffer */
+
+ context->connection = field_obj->field.resource_buffer;
+@@ -230,6 +264,14 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
+ context && field_obj) {
+
++ status =
++ acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER);
++ if (ACPI_FAILURE(status)) {
++ goto re_enter_interpreter;
++ }
++
++ context_locked = TRUE;
++
+ /* Get the Connection (resource_template) buffer */
+
+ context->connection = field_obj->field.resource_buffer;
+@@ -239,28 +281,15 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ bit_width = field_obj->field.bit_length;
+ }
+
+- ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+- "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+- &region_obj->region.handler->address_space, handler,
+- ACPI_FORMAT_UINT64(address),
+- acpi_ut_get_region_name(region_obj->region.
+- space_id)));
+-
+- if (!(handler_desc->address_space.handler_flags &
+- ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+- /*
+- * For handlers other than the default (supplied) handlers, we must
+- * exit the interpreter because the handler *might* block -- we don't
+- * know what it will do, so we can't hold the lock on the interpreter.
+- */
+- acpi_ex_exit_interpreter();
+- }
+-
+ /* Call the handler */
+
+ status = handler(function, address, bit_width, value, context,
+ region_obj2->extra.region_context);
+
++ if (context_locked) {
++ acpi_os_release_mutex(context_mutex);
++ }
++
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
+ acpi_ut_get_region_name(region_obj->region.
+@@ -277,6 +306,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ }
+ }
+
++re_enter_interpreter:
+ if (!(handler_desc->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+ /*
+diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
+index da97fd0c6b51e..3bb06f17a18b6 100644
+--- a/drivers/acpi/acpica/evxfregn.c
++++ b/drivers/acpi/acpica/evxfregn.c
+@@ -201,6 +201,8 @@ acpi_remove_address_space_handler(acpi_handle device,
+
+ /* Now we can delete the handler object */
+
++ acpi_os_release_mutex(handler_obj->address_space.
++ context_mutex);
+ acpi_ut_remove_reference(handler_obj);
+ goto unlock_and_exit;
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index a5af223eaf501..81506d2539b07 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -626,8 +626,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+ gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+
+- gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+-
+ /* Enable USE_RETENTION_FLOPS */
+ gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
+
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 5ba0aa1d23353..b60279aaed438 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -641,6 +641,8 @@
+ #define USB_DEVICE_ID_INNEX_GENESIS_ATARI 0x4745
+
+ #define USB_VENDOR_ID_ITE 0x048d
++#define I2C_VENDOR_ID_ITE 0x103c
++#define I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15 0x184f
+ #define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
+ #define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
+ #define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
+diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
+index 22bfbebceaf44..14fc068affada 100644
+--- a/drivers/hid/hid-ite.c
++++ b/drivers/hid/hid-ite.c
+@@ -23,11 +23,16 @@ static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int
+ hid_info(hdev, "Fixing up Acer Sw5-012 ITE keyboard report descriptor\n");
+ rdesc[163] = HID_MAIN_ITEM_RELATIVE;
+ }
+- /* For Acer One S1002 keyboard-dock */
++ /* For Acer One S1002/S1003 keyboard-dock */
+ if (*rsize == 188 && rdesc[185] == 0x81 && rdesc[186] == 0x02) {
+- hid_info(hdev, "Fixing up Acer S1002 ITE keyboard report descriptor\n");
++ hid_info(hdev, "Fixing up Acer S1002/S1003 ITE keyboard report descriptor\n");
+ rdesc[186] = HID_MAIN_ITEM_RELATIVE;
+ }
++ /* For Acer Aspire Switch 10E (SW3-016) keyboard-dock */
++ if (*rsize == 210 && rdesc[184] == 0x81 && rdesc[185] == 0x02) {
++ hid_info(hdev, "Fixing up Acer Aspire Switch 10E (SW3-016) ITE keyboard report descriptor\n");
++ rdesc[185] = HID_MAIN_ITEM_RELATIVE;
++ }
+ }
+
+ return rdesc;
+@@ -114,7 +119,8 @@ static const struct hid_device_id ite_devices[] = {
+ /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_SYNAPTICS,
+- USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
++ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003),
++ .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, ite_devices);
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index bfe716d7ea441..c586acf2fc0b4 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -171,6 +171,8 @@ static const struct i2c_hid_quirks {
+ I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
++ { I2C_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15,
++ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
+ { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
+ { USB_VENDOR_ID_ELAN, HID_ANY_ID,
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index f0adbc48fd179..9256f84f5ebf1 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -1502,6 +1502,10 @@ static bool increase_address_space(struct protection_domain *domain,
+ bool ret = true;
+ u64 *pte;
+
++ pte = (void *)get_zeroed_page(gfp);
++ if (!pte)
++ return false;
++
+ spin_lock_irqsave(&domain->lock, flags);
+
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+@@ -1513,10 +1517,6 @@ static bool increase_address_space(struct protection_domain *domain,
+ if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
+ goto out;
+
+- pte = (void *)get_zeroed_page(gfp);
+- if (!pte)
+- goto out;
+-
+ *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
+
+ pgtable.root = pte;
+@@ -1530,10 +1530,12 @@ static bool increase_address_space(struct protection_domain *domain,
+ */
+ amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode);
+
++ pte = NULL;
+ ret = true;
+
+ out:
+ spin_unlock_irqrestore(&domain->lock, flags);
++ free_page((unsigned long)pte);
+
+ return ret;
+ }
+diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
+index d92c4d2c521a3..6e5f544c9c737 100644
+--- a/drivers/misc/eeprom/eeprom_93xx46.c
++++ b/drivers/misc/eeprom/eeprom_93xx46.c
+@@ -35,6 +35,10 @@ static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
+ EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
+ };
+
++static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = {
++ .quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE,
++};
++
+ struct eeprom_93xx46_dev {
+ struct spi_device *spi;
+ struct eeprom_93xx46_platform_data *pdata;
+@@ -55,6 +59,11 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
+ return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
+ }
+
++static inline bool has_quirk_extra_read_cycle(struct eeprom_93xx46_dev *edev)
++{
++ return edev->pdata->quirks & EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE;
++}
++
+ static int eeprom_93xx46_read(void *priv, unsigned int off,
+ void *val, size_t count)
+ {
+@@ -96,6 +105,11 @@ static int eeprom_93xx46_read(void *priv, unsigned int off,
+ dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
+ cmd_addr, edev->spi->max_speed_hz);
+
++ if (has_quirk_extra_read_cycle(edev)) {
++ cmd_addr <<= 1;
++ bits += 1;
++ }
++
+ spi_message_init(&m);
+
+ t[0].tx_buf = (char *)&cmd_addr;
+@@ -363,6 +377,7 @@ static void select_deassert(void *context)
+ static const struct of_device_id eeprom_93xx46_of_table[] = {
+ { .compatible = "eeprom-93xx46", },
+ { .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
++ { .compatible = "microchip,93lc46b", .data = &microchip_93lc46b_data, },
+ {}
+ };
+ MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
+diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
+index d90020ed36227..59d8d96ce206b 100644
+--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
++++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
+@@ -112,6 +112,7 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = {
+ static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
+ .ops = &sdhci_dwcmshc_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
++ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ };
+
+ static int dwcmshc_probe(struct platform_device *pdev)
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 6bad4d4dcdf07..806a5d071ef65 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3230,7 +3230,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
+ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
+ { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
+- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
++ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
++ NVME_QUIRK_NO_NS_DESC_LIST, },
+ { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
+@@ -3244,6 +3245,9 @@ static const struct pci_device_id nvme_id_table[] = {
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
++ { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
++ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
++ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
+ .driver_data = NVME_QUIRK_LIGHTNVM, },
+ { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
+@@ -3261,6 +3265,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
++ { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */
++ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
+diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
+index dac1ac8a76159..849f1e416ea57 100644
+--- a/drivers/pci/controller/cadence/pci-j721e.c
++++ b/drivers/pci/controller/cadence/pci-j721e.c
+@@ -64,6 +64,7 @@ enum j721e_pcie_mode {
+
+ struct j721e_pcie_data {
+ enum j721e_pcie_mode mode;
++ bool quirk_retrain_flag;
+ };
+
+ static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
+@@ -280,6 +281,7 @@ static struct pci_ops cdns_ti_pcie_host_ops = {
+
+ static const struct j721e_pcie_data j721e_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
++ .quirk_retrain_flag = true,
+ };
+
+ static const struct j721e_pcie_data j721e_pcie_ep_data = {
+@@ -388,6 +390,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+
+ bridge->ops = &cdns_ti_pcie_host_ops;
+ rc = pci_host_bridge_priv(bridge);
++ rc->quirk_retrain_flag = data->quirk_retrain_flag;
+
+ cdns_pcie = &rc->pcie;
+ cdns_pcie->dev = dev;
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
+index 1cb7cfc75d6e4..73dcf8cf98fbf 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
++++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
+@@ -77,6 +77,68 @@ static struct pci_ops cdns_pcie_host_ops = {
+ .write = pci_generic_config_write,
+ };
+
++static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
++{
++ struct device *dev = pcie->dev;
++ int retries;
++
++ /* Check if the link is up or not */
++ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
++ if (cdns_pcie_link_up(pcie)) {
++ dev_info(dev, "Link up\n");
++ return 0;
++ }
++ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
++ }
++
++ return -ETIMEDOUT;
++}
++
++static int cdns_pcie_retrain(struct cdns_pcie *pcie)
++{
++ u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
++ u16 lnk_stat, lnk_ctl;
++ int ret = 0;
++
++ /*
++ * Set retrain bit if current speed is 2.5 GB/s,
++ * but the PCIe root port support is > 2.5 GB/s.
++ */
++
++ lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off +
++ PCI_EXP_LNKCAP));
++ if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
++ return ret;
++
++ lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
++ if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
++ lnk_ctl = cdns_pcie_rp_readw(pcie,
++ pcie_cap_off + PCI_EXP_LNKCTL);
++ lnk_ctl |= PCI_EXP_LNKCTL_RL;
++ cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
++ lnk_ctl);
++
++ ret = cdns_pcie_host_wait_for_link(pcie);
++ }
++ return ret;
++}
++
++static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
++{
++ struct cdns_pcie *pcie = &rc->pcie;
++ int ret;
++
++ ret = cdns_pcie_host_wait_for_link(pcie);
++
++ /*
++ * Retrain link for Gen2 training defect
++ * if quirk flag is set.
++ */
++ if (!ret && rc->quirk_retrain_flag)
++ ret = cdns_pcie_retrain(pcie);
++
++ return ret;
++}
+
+ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
+ {
+@@ -399,23 +461,6 @@ static int cdns_pcie_host_init(struct device *dev,
+ return cdns_pcie_host_init_address_translation(rc);
+ }
+
+-static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
+-{
+- struct device *dev = pcie->dev;
+- int retries;
+-
+- /* Check if the link is up or not */
+- for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+- if (cdns_pcie_link_up(pcie)) {
+- dev_info(dev, "Link up\n");
+- return 0;
+- }
+- usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+- }
+-
+- return -ETIMEDOUT;
+-}
+-
+ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
+ {
+ struct device *dev = rc->pcie.dev;
+@@ -458,7 +503,7 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
+ return ret;
+ }
+
+- ret = cdns_pcie_host_wait_for_link(pcie);
++ ret = cdns_pcie_host_start_link(rc);
+ if (ret)
+ dev_dbg(dev, "PCIe link never came up\n");
+
+diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
+index 30eba6cafe2c1..254d2570f8c91 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence.h
++++ b/drivers/pci/controller/cadence/pcie-cadence.h
+@@ -119,7 +119,7 @@
+ * Root Port Registers (PCI configuration space for the root port function)
+ */
+ #define CDNS_PCIE_RP_BASE 0x00200000
+-
++#define CDNS_PCIE_RP_CAP_OFFSET 0xc0
+
+ /*
+ * Address Translation Registers
+@@ -291,6 +291,7 @@ struct cdns_pcie {
+ * @device_id: PCI device ID
+ * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
+ * available
++ * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
+ */
+ struct cdns_pcie_rc {
+ struct cdns_pcie pcie;
+@@ -299,6 +300,7 @@ struct cdns_pcie_rc {
+ u32 vendor_id;
+ u32 device_id;
+ bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
++ bool quirk_retrain_flag;
+ };
+
+ /**
+@@ -414,6 +416,13 @@ static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
+ cdns_pcie_write_sz(addr, 0x2, value);
+ }
+
++static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
++{
++ void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
++
++ return cdns_pcie_read_sz(addr, 0x2);
++}
++
+ /* Endpoint Function register access */
+ static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
+ u32 reg, u8 value)
+diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
+index a8770ff145883..267943a13a946 100644
+--- a/drivers/scsi/ufs/ufs-exynos.c
++++ b/drivers/scsi/ufs/ufs-exynos.c
+@@ -640,6 +640,11 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
+ }
+ }
+
++ /* setting for three timeout values for traffic class #0 */
++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 8064);
++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 28224);
++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 20160);
++
+ return 0;
+ out:
+ return ret;
+@@ -1236,7 +1241,9 @@ struct exynos_ufs_drv_data exynos_ufs_drvs = {
+ UFSHCI_QUIRK_BROKEN_HCE |
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+- UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL,
++ UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
++ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
++ UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE,
+ .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
+ EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
+diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
+index 80618af7c8720..c55202b92a43a 100644
+--- a/drivers/scsi/ufs/ufs-mediatek.c
++++ b/drivers/scsi/ufs/ufs-mediatek.c
+@@ -661,6 +661,7 @@ static int ufs_mtk_init(struct ufs_hba *hba)
+
+ /* Enable WriteBooster */
+ hba->caps |= UFSHCD_CAP_WB_EN;
++ hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
+ hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
+
+ if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 728168cd18f55..428b9e0ac47e9 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -4220,25 +4220,27 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+ pwr_mode->hs_rate);
+
+- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+- DL_FC0ProtectionTimeOutVal_Default);
+- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+- DL_TC0ReplayTimeOutVal_Default);
+- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+- DL_AFC0ReqTimeOutVal_Default);
+- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+- DL_FC1ProtectionTimeOutVal_Default);
+- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+- DL_TC1ReplayTimeOutVal_Default);
+- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+- DL_AFC1ReqTimeOutVal_Default);
+-
+- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+- DL_FC0ProtectionTimeOutVal_Default);
+- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+- DL_TC0ReplayTimeOutVal_Default);
+- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+- DL_AFC0ReqTimeOutVal_Default);
++ if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
++ DL_FC0ProtectionTimeOutVal_Default);
++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
++ DL_TC0ReplayTimeOutVal_Default);
++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
++ DL_AFC0ReqTimeOutVal_Default);
++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
++ DL_FC1ProtectionTimeOutVal_Default);
++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
++ DL_TC1ReplayTimeOutVal_Default);
++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
++ DL_AFC1ReqTimeOutVal_Default);
++
++ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
++ DL_FC0ProtectionTimeOutVal_Default);
++ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
++ DL_TC0ReplayTimeOutVal_Default);
++ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
++ DL_AFC0ReqTimeOutVal_Default);
++ }
+
+ ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
+ | pwr_mode->pwr_tx);
+@@ -4829,6 +4831,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
+ struct request_queue *q = sdev->request_queue;
+
+ blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
++ if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
++ blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
+
+ if (ufshcd_is_rpm_autosuspend_allowed(hba))
+ sdev->rpm_autosuspend = 1;
+diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
+index aa9ea35523239..1885ec9126c44 100644
+--- a/drivers/scsi/ufs/ufshcd.h
++++ b/drivers/scsi/ufs/ufshcd.h
+@@ -551,6 +551,16 @@ enum ufshcd_quirks {
+ */
+ UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
+
++ /*
++ * This quirk needs to disable unipro timeout values
++ * before power mode change
++ */
++ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
++
++ /*
++ * This quirk allows only sg entries aligned with page size.
++ */
++ UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14,
+ };
+
+ enum ufshcd_caps {
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
+index ddad5d274ee81..7bd9291c8d5f5 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
+@@ -34,56 +34,48 @@ static const struct cedrus_control cedrus_controls[] = {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
+ },
+ .codec = CEDRUS_CODEC_MPEG2,
+- .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
+ },
+ .codec = CEDRUS_CODEC_MPEG2,
+- .required = false,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
+ },
+ .codec = CEDRUS_CODEC_H264,
+- .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SLICE_PARAMS,
+ },
+ .codec = CEDRUS_CODEC_H264,
+- .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SPS,
+ },
+ .codec = CEDRUS_CODEC_H264,
+- .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_PPS,
+ },
+ .codec = CEDRUS_CODEC_H264,
+- .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
+ },
+ .codec = CEDRUS_CODEC_H264,
+- .required = false,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_PRED_WEIGHTS,
+ },
+ .codec = CEDRUS_CODEC_H264,
+- .required = false,
+ },
+ {
+ .cfg = {
+@@ -92,7 +84,6 @@ static const struct cedrus_control cedrus_controls[] = {
+ .def = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
+ },
+ .codec = CEDRUS_CODEC_H264,
+- .required = false,
+ },
+ {
+ .cfg = {
+@@ -101,7 +92,6 @@ static const struct cedrus_control cedrus_controls[] = {
+ .def = V4L2_STATELESS_H264_START_CODE_NONE,
+ },
+ .codec = CEDRUS_CODEC_H264,
+- .required = false,
+ },
+ /*
+ * We only expose supported profiles information,
+@@ -120,28 +110,24 @@ static const struct cedrus_control cedrus_controls[] = {
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
+ },
+ .codec = CEDRUS_CODEC_H264,
+- .required = false,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+- .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_PPS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+- .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+- .required = true,
+ },
+ {
+ .cfg = {
+@@ -150,7 +136,6 @@ static const struct cedrus_control cedrus_controls[] = {
+ .def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ },
+ .codec = CEDRUS_CODEC_H265,
+- .required = false,
+ },
+ {
+ .cfg = {
+@@ -159,14 +144,12 @@ static const struct cedrus_control cedrus_controls[] = {
+ .def = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ },
+ .codec = CEDRUS_CODEC_H265,
+- .required = false,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER,
+ },
+ .codec = CEDRUS_CODEC_VP8,
+- .required = true,
+ },
+ };
+
+@@ -227,12 +210,8 @@ static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
+ static int cedrus_request_validate(struct media_request *req)
+ {
+ struct media_request_object *obj;
+- struct v4l2_ctrl_handler *parent_hdl, *hdl;
+ struct cedrus_ctx *ctx = NULL;
+- struct v4l2_ctrl *ctrl_test;
+ unsigned int count;
+- unsigned int i;
+- int ret = 0;
+
+ list_for_each_entry(obj, &req->objects, list) {
+ struct vb2_buffer *vb;
+@@ -259,34 +238,6 @@ static int cedrus_request_validate(struct media_request *req)
+ return -EINVAL;
+ }
+
+- parent_hdl = &ctx->hdl;
+-
+- hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl);
+- if (!hdl) {
+- v4l2_info(&ctx->dev->v4l2_dev, "Missing codec control(s)\n");
+- return -ENOENT;
+- }
+-
+- for (i = 0; i < CEDRUS_CONTROLS_COUNT; i++) {
+- if (cedrus_controls[i].codec != ctx->current_codec ||
+- !cedrus_controls[i].required)
+- continue;
+-
+- ctrl_test = v4l2_ctrl_request_hdl_ctrl_find(hdl,
+- cedrus_controls[i].cfg.id);
+- if (!ctrl_test) {
+- v4l2_info(&ctx->dev->v4l2_dev,
+- "Missing required codec control\n");
+- ret = -ENOENT;
+- break;
+- }
+- }
+-
+- v4l2_ctrl_request_hdl_put(hdl);
+-
+- if (ret)
+- return ret;
+-
+ return vb2_request_validate(req);
+ }
+
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
+index c96077aaef493..251a6a6603516 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
++++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
+@@ -56,7 +56,6 @@ enum cedrus_h264_pic_type {
+ struct cedrus_control {
+ struct v4l2_ctrl_config cfg;
+ enum cedrus_codec codec;
+- unsigned char required:1;
+ };
+
+ struct cedrus_h264_run {
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index f12e6a0aa3c70..a09912cf1852d 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -627,7 +627,8 @@ static int btrfs_delayed_inode_reserve_metadata(
+ */
+ if (!src_rsv || (!trans->bytes_reserved &&
+ src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
+- ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
++ ret = btrfs_qgroup_reserve_meta(root, num_bytes,
++ BTRFS_QGROUP_RSV_META_PREALLOC, true);
+ if (ret < 0)
+ return ret;
+ ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 40ccb8ddab23a..9b4f75568261e 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5916,7 +5916,7 @@ static int btrfs_dirty_inode(struct inode *inode)
+ return PTR_ERR(trans);
+
+ ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+- if (ret && ret == -ENOSPC) {
++ if (ret && (ret == -ENOSPC || ret == -EDQUOT)) {
+ /* whoops, lets try again with the full transaction */
+ btrfs_end_transaction(trans);
+ trans = btrfs_start_transaction(root, 1);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 808370ada8889..14ff388fd3bda 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -3841,8 +3841,8 @@ static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
+ return num_bytes;
+ }
+
+-static int qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
+- enum btrfs_qgroup_rsv_type type, bool enforce)
++int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
++ enum btrfs_qgroup_rsv_type type, bool enforce)
+ {
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret;
+@@ -3873,14 +3873,14 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
+ {
+ int ret;
+
+- ret = qgroup_reserve_meta(root, num_bytes, type, enforce);
++ ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
+ if (ret <= 0 && ret != -EDQUOT)
+ return ret;
+
+ ret = try_flush_qgroup(root);
+ if (ret < 0)
+ return ret;
+- return qgroup_reserve_meta(root, num_bytes, type, enforce);
++ return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
+ }
+
+ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
+diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
+index 50dea9a2d8fbd..7283e4f549af7 100644
+--- a/fs/btrfs/qgroup.h
++++ b/fs/btrfs/qgroup.h
+@@ -361,6 +361,8 @@ int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len);
+ int btrfs_qgroup_free_data(struct btrfs_inode *inode,
+ struct extent_changeset *reserved, u64 start,
+ u64 len);
++int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
++ enum btrfs_qgroup_rsv_type type, bool enforce);
+ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
+ enum btrfs_qgroup_rsv_type type, bool enforce);
+ /* Reserve metadata space for pertrans and prealloc type */
+diff --git a/fs/file.c b/fs/file.c
+index dab120b71e44d..f3a4bac2cbe91 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -22,6 +22,8 @@
+ #include <linux/close_range.h>
+ #include <net/sock.h>
+
++#include "internal.h"
++
+ unsigned int sysctl_nr_open __read_mostly = 1024*1024;
+ unsigned int sysctl_nr_open_min = BITS_PER_LONG;
+ /* our min() is unusable in constant expressions ;-/ */
+@@ -732,36 +734,48 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
+ }
+
+ /*
+- * variant of close_fd that gets a ref on the file for later fput.
+- * The caller must ensure that filp_close() called on the file, and then
+- * an fput().
++ * See close_fd_get_file() below, this variant assumes current->files->file_lock
++ * is held.
+ */
+-int close_fd_get_file(unsigned int fd, struct file **res)
++int __close_fd_get_file(unsigned int fd, struct file **res)
+ {
+ struct files_struct *files = current->files;
+ struct file *file;
+ struct fdtable *fdt;
+
+- spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ if (fd >= fdt->max_fds)
+- goto out_unlock;
++ goto out_err;
+ file = fdt->fd[fd];
+ if (!file)
+- goto out_unlock;
++ goto out_err;
+ rcu_assign_pointer(fdt->fd[fd], NULL);
+ __put_unused_fd(files, fd);
+- spin_unlock(&files->file_lock);
+ get_file(file);
+ *res = file;
+ return 0;
+-
+-out_unlock:
+- spin_unlock(&files->file_lock);
++out_err:
+ *res = NULL;
+ return -ENOENT;
+ }
+
++/*
++ * variant of close_fd that gets a ref on the file for later fput.
++ * The caller must ensure that filp_close() called on the file, and then
++ * an fput().
++ */
++int close_fd_get_file(unsigned int fd, struct file **res)
++{
++ struct files_struct *files = current->files;
++ int ret;
++
++ spin_lock(&files->file_lock);
++ ret = __close_fd_get_file(fd, res);
++ spin_unlock(&files->file_lock);
++
++ return ret;
++}
++
+ void do_close_on_exec(struct files_struct *files)
+ {
+ unsigned i;
+diff --git a/fs/internal.h b/fs/internal.h
+index 77c50befbfbe9..c6c85f6ad598a 100644
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -132,6 +132,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
+ const char *, const struct open_flags *);
+ extern struct open_how build_open_how(int flags, umode_t mode);
+ extern int build_open_flags(const struct open_how *how, struct open_flags *op);
++extern int __close_fd_get_file(unsigned int fd, struct file **res);
+
+ long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
+ int chmod_common(const struct path *path, umode_t mode);
+diff --git a/fs/io-wq.c b/fs/io-wq.c
+index a564f36e260c1..63ef195b1acb1 100644
+--- a/fs/io-wq.c
++++ b/fs/io-wq.c
+@@ -555,23 +555,21 @@ get_next:
+
+ /* handle a whole dependent link */
+ do {
+- struct io_wq_work *old_work, *next_hashed, *linked;
++ struct io_wq_work *next_hashed, *linked;
+ unsigned int hash = io_get_work_hash(work);
+
+ next_hashed = wq_next_work(work);
+ io_impersonate_work(worker, work);
++ wq->do_work(work);
++ io_assign_current_work(worker, NULL);
+
+- old_work = work;
+- linked = wq->do_work(work);
+-
++ linked = wq->free_work(work);
+ work = next_hashed;
+ if (!work && linked && !io_wq_is_hashed(linked)) {
+ work = linked;
+ linked = NULL;
+ }
+ io_assign_current_work(worker, work);
+- wq->free_work(old_work);
+-
+ if (linked)
+ io_wqe_enqueue(wqe, linked);
+
+@@ -850,11 +848,9 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
+ struct io_wq *wq = wqe->wq;
+
+ do {
+- struct io_wq_work *old_work = work;
+-
+ work->flags |= IO_WQ_WORK_CANCEL;
+- work = wq->do_work(work);
+- wq->free_work(old_work);
++ wq->do_work(work);
++ work = wq->free_work(work);
+ } while (work);
+ }
+
+@@ -944,7 +940,6 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
+ */
+ spin_lock_irqsave(&worker->lock, flags);
+ if (worker->cur_work &&
+- !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
+ match->fn(worker->cur_work, match->data)) {
+ send_sig(SIGINT, worker->task, 1);
+ match->nr_running++;
+diff --git a/fs/io-wq.h b/fs/io-wq.h
+index b158f8addcf3e..e37a0f217cc8b 100644
+--- a/fs/io-wq.h
++++ b/fs/io-wq.h
+@@ -9,7 +9,6 @@ enum {
+ IO_WQ_WORK_CANCEL = 1,
+ IO_WQ_WORK_HASHED = 2,
+ IO_WQ_WORK_UNBOUND = 4,
+- IO_WQ_WORK_NO_CANCEL = 8,
+ IO_WQ_WORK_CONCURRENT = 16,
+
+ IO_WQ_WORK_FILES = 32,
+@@ -107,8 +106,8 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
+ return container_of(work->list.next, struct io_wq_work, list);
+ }
+
+-typedef void (free_work_fn)(struct io_wq_work *);
+-typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *);
++typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
++typedef void (io_wq_work_fn)(struct io_wq_work *);
+
+ struct io_wq_data {
+ struct user_struct *user;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 38bfd168ad3b7..241313278e5a5 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -411,7 +411,6 @@ struct io_poll_remove {
+
+ struct io_close {
+ struct file *file;
+- struct file *put_file;
+ int fd;
+ };
+
+@@ -908,8 +907,6 @@ static const struct io_op_def io_op_defs[] = {
+ IO_WQ_WORK_FS | IO_WQ_WORK_MM,
+ },
+ [IORING_OP_CLOSE] = {
+- .needs_file = 1,
+- .needs_file_no_error = 1,
+ .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
+ },
+ [IORING_OP_FILES_UPDATE] = {
+@@ -996,9 +993,9 @@ enum io_mem_account {
+ ACCT_PINNED,
+ };
+
+-static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
+- struct task_struct *task);
+-
++static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
++ struct task_struct *task,
++ struct files_struct *files);
+ static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
+ static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
+ struct io_ring_ctx *ctx);
+@@ -2172,6 +2169,16 @@ static int io_req_task_work_add(struct io_kiocb *req)
+ return ret;
+ }
+
++static void io_req_task_work_add_fallback(struct io_kiocb *req,
++ void (*cb)(struct callback_head *))
++{
++ struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq);
++
++ init_task_work(&req->task_work, cb);
++ task_work_add(tsk, &req->task_work, TWA_NONE);
++ wake_up_process(tsk);
++}
++
+ static void __io_req_task_cancel(struct io_kiocb *req, int error)
+ {
+ struct io_ring_ctx *ctx = req->ctx;
+@@ -2191,7 +2198,9 @@ static void io_req_task_cancel(struct callback_head *cb)
+ struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+ struct io_ring_ctx *ctx = req->ctx;
+
++ mutex_lock(&ctx->uring_lock);
+ __io_req_task_cancel(req, -ECANCELED);
++ mutex_unlock(&ctx->uring_lock);
+ percpu_ref_put(&ctx->refs);
+ }
+
+@@ -2229,14 +2238,8 @@ static void io_req_task_queue(struct io_kiocb *req)
+ percpu_ref_get(&req->ctx->refs);
+
+ ret = io_req_task_work_add(req);
+- if (unlikely(ret)) {
+- struct task_struct *tsk;
+-
+- init_task_work(&req->task_work, io_req_task_cancel);
+- tsk = io_wq_get_task(req->ctx->io_wq);
+- task_work_add(tsk, &req->task_work, TWA_NONE);
+- wake_up_process(tsk);
+- }
++ if (unlikely(ret))
++ io_req_task_work_add_fallback(req, io_req_task_cancel);
+ }
+
+ static inline void io_queue_next(struct io_kiocb *req)
+@@ -2354,13 +2357,8 @@ static void io_free_req_deferred(struct io_kiocb *req)
+
+ init_task_work(&req->task_work, io_put_req_deferred_cb);
+ ret = io_req_task_work_add(req);
+- if (unlikely(ret)) {
+- struct task_struct *tsk;
+-
+- tsk = io_wq_get_task(req->ctx->io_wq);
+- task_work_add(tsk, &req->task_work, TWA_NONE);
+- wake_up_process(tsk);
+- }
++ if (unlikely(ret))
++ io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
+ }
+
+ static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
+@@ -2369,22 +2367,6 @@ static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
+ io_free_req_deferred(req);
+ }
+
+-static struct io_wq_work *io_steal_work(struct io_kiocb *req)
+-{
+- struct io_kiocb *nxt;
+-
+- /*
+- * A ref is owned by io-wq in which context we're. So, if that's the
+- * last one, it's safe to steal next work. False negatives are Ok,
+- * it just will be re-punted async in io_put_work()
+- */
+- if (refcount_read(&req->refs) != 1)
+- return NULL;
+-
+- nxt = io_req_find_next(req);
+- return nxt ? &nxt->work : NULL;
+-}
+-
+ static void io_double_put_req(struct io_kiocb *req)
+ {
+ /* drop both submit and complete references */
+@@ -3439,15 +3421,8 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
+ /* submit ref gets dropped, acquire a new one */
+ refcount_inc(&req->refs);
+ ret = io_req_task_work_add(req);
+- if (unlikely(ret)) {
+- struct task_struct *tsk;
+-
+- /* queue just for cancelation */
+- init_task_work(&req->task_work, io_req_task_cancel);
+- tsk = io_wq_get_task(req->ctx->io_wq);
+- task_work_add(tsk, &req->task_work, TWA_NONE);
+- wake_up_process(tsk);
+- }
++ if (unlikely(ret))
++ io_req_task_work_add_fallback(req, io_req_task_cancel);
+ return 1;
+ }
+
+@@ -4481,13 +4456,6 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock)
+
+ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+- /*
+- * If we queue this for async, it must not be cancellable. That would
+- * leave the 'file' in an undeterminate state, and here need to modify
+- * io_wq_work.flags, so initialize io_wq_work firstly.
+- */
+- io_req_init_async(req);
+-
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
+@@ -4497,43 +4465,59 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ return -EBADF;
+
+ req->close.fd = READ_ONCE(sqe->fd);
+- if ((req->file && req->file->f_op == &io_uring_fops))
+- return -EBADF;
+-
+- req->close.put_file = NULL;
+ return 0;
+ }
+
+ static int io_close(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
+ {
++ struct files_struct *files = current->files;
+ struct io_close *close = &req->close;
++ struct fdtable *fdt;
++ struct file *file;
+ int ret;
+
+- /* might be already done during nonblock submission */
+- if (!close->put_file) {
+- ret = close_fd_get_file(close->fd, &close->put_file);
+- if (ret < 0)
+- return (ret == -ENOENT) ? -EBADF : ret;
++ file = NULL;
++ ret = -EBADF;
++ spin_lock(&files->file_lock);
++ fdt = files_fdtable(files);
++ if (close->fd >= fdt->max_fds) {
++ spin_unlock(&files->file_lock);
++ goto err;
++ }
++ file = fdt->fd[close->fd];
++ if (!file) {
++ spin_unlock(&files->file_lock);
++ goto err;
++ }
++
++ if (file->f_op == &io_uring_fops) {
++ spin_unlock(&files->file_lock);
++ file = NULL;
++ goto err;
+ }
+
+ /* if the file has a flush method, be safe and punt to async */
+- if (close->put_file->f_op->flush && force_nonblock) {
+- /* not safe to cancel at this point */
+- req->work.flags |= IO_WQ_WORK_NO_CANCEL;
+- /* was never set, but play safe */
+- req->flags &= ~REQ_F_NOWAIT;
+- /* avoid grabbing files - we don't need the files */
+- req->flags |= REQ_F_NO_FILE_TABLE;
++ if (file->f_op->flush && force_nonblock) {
++ spin_unlock(&files->file_lock);
+ return -EAGAIN;
+ }
+
++ ret = __close_fd_get_file(close->fd, &file);
++ spin_unlock(&files->file_lock);
++ if (ret < 0) {
++ if (ret == -ENOENT)
++ ret = -EBADF;
++ goto err;
++ }
++
+ /* No ->flush() or already async, safely close from here */
+- ret = filp_close(close->put_file, req->work.identity->files);
++ ret = filp_close(file, current->files);
++err:
+ if (ret < 0)
+ req_set_fail_links(req);
+- fput(close->put_file);
+- close->put_file = NULL;
++ if (file)
++ fput(file);
+ __io_req_complete(req, ret, 0, cs);
+ return 0;
+ }
+@@ -5159,12 +5143,8 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
+ */
+ ret = io_req_task_work_add(req);
+ if (unlikely(ret)) {
+- struct task_struct *tsk;
+-
+ WRITE_ONCE(poll->canceled, true);
+- tsk = io_wq_get_task(req->ctx->io_wq);
+- task_work_add(tsk, &req->task_work, TWA_NONE);
+- wake_up_process(tsk);
++ io_req_task_work_add_fallback(req, func);
+ }
+ return 1;
+ }
+@@ -6384,7 +6364,7 @@ static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
+ return 0;
+ }
+
+-static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
++static void io_wq_submit_work(struct io_wq_work *work)
+ {
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct io_kiocb *timeout;
+@@ -6394,10 +6374,12 @@ static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
+ if (timeout)
+ io_queue_linked_timeout(timeout);
+
+- /* if NO_CANCEL is set, we must still run the work */
+- if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
+- IO_WQ_WORK_CANCEL) {
+- ret = -ECANCELED;
++ if (work->flags & IO_WQ_WORK_CANCEL) {
++ /* io-wq is going to take down one */
++ refcount_inc(&req->refs);
++ percpu_ref_get(&req->ctx->refs);
++ io_req_task_work_add_fallback(req, io_req_task_cancel);
++ return;
+ }
+
+ if (!ret) {
+@@ -6438,8 +6420,6 @@ static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
+ if (lock_ctx)
+ mutex_unlock(&lock_ctx->uring_lock);
+ }
+-
+- return io_steal_work(req);
+ }
+
+ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
+@@ -6506,9 +6486,10 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
+ if (prev) {
+ req_set_fail_links(prev);
+ io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
+- io_put_req(prev);
++ io_put_req_deferred(prev, 1);
+ } else {
+- io_req_complete(req, -ETIME);
++ io_cqring_add_event(req, -ETIME, 0);
++ io_put_req_deferred(req, 1);
+ }
+ return HRTIMER_NORESTART;
+ }
+@@ -8070,12 +8051,12 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
+ return __io_sqe_files_update(ctx, &up, nr_args);
+ }
+
+-static void io_free_work(struct io_wq_work *work)
++static struct io_wq_work *io_free_work(struct io_wq_work *work)
+ {
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+- /* Consider that io_steal_work() relies on this ref */
+- io_put_req(req);
++ req = io_put_req_find_next(req);
++ return req ? &req->work : NULL;
+ }
+
+ static int io_init_wq_offload(struct io_ring_ctx *ctx,
+@@ -8779,7 +8760,7 @@ static void io_ring_exit_work(struct work_struct *work)
+ * as nobody else will be looking for them.
+ */
+ do {
+- __io_uring_cancel_task_requests(ctx, NULL);
++ io_uring_try_cancel_requests(ctx, NULL, NULL);
+ } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
+ io_ring_ctx_free(ctx);
+ }
+@@ -8893,6 +8874,40 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+ }
+ }
+
++static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
++ struct task_struct *task,
++ struct files_struct *files)
++{
++ struct io_task_cancel cancel = { .task = task, .files = files, };
++
++ while (1) {
++ enum io_wq_cancel cret;
++ bool ret = false;
++
++ if (ctx->io_wq) {
++ cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
++ &cancel, true);
++ ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
++ }
++
++ /* SQPOLL thread does its own polling */
++ if (!(ctx->flags & IORING_SETUP_SQPOLL) && !files) {
++ while (!list_empty_careful(&ctx->iopoll_list)) {
++ io_iopoll_try_reap_events(ctx);
++ ret = true;
++ }
++ }
++
++ ret |= io_poll_remove_all(ctx, task, files);
++ ret |= io_kill_timeouts(ctx, task, files);
++ ret |= io_run_task_work();
++ io_cqring_overflow_flush(ctx, true, task, files);
++ if (!ret)
++ break;
++ cond_resched();
++ }
++}
++
+ static int io_uring_count_inflight(struct io_ring_ctx *ctx,
+ struct task_struct *task,
+ struct files_struct *files)
+@@ -8912,7 +8927,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+ {
+ while (!list_empty_careful(&ctx->inflight_list)) {
+- struct io_task_cancel cancel = { .task = task, .files = files };
+ DEFINE_WAIT(wait);
+ int inflight;
+
+@@ -8920,49 +8934,17 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ if (!inflight)
+ break;
+
+- io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
+- io_poll_remove_all(ctx, task, files);
+- io_kill_timeouts(ctx, task, files);
+- io_cqring_overflow_flush(ctx, true, task, files);
+- /* cancellations _may_ trigger task work */
+- io_run_task_work();
++ io_uring_try_cancel_requests(ctx, task, files);
+
++ if (ctx->sq_data)
++ io_sq_thread_unpark(ctx->sq_data);
+ prepare_to_wait(&task->io_uring->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (inflight == io_uring_count_inflight(ctx, task, files))
+ schedule();
+ finish_wait(&task->io_uring->wait, &wait);
+- }
+-}
+-
+-static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
+- struct task_struct *task)
+-{
+- while (1) {
+- struct io_task_cancel cancel = { .task = task, .files = NULL, };
+- enum io_wq_cancel cret;
+- bool ret = false;
+-
+- if (ctx->io_wq) {
+- cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
+- &cancel, true);
+- ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
+- }
+-
+- /* SQPOLL thread does its own polling */
+- if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
+- while (!list_empty_careful(&ctx->iopoll_list)) {
+- io_iopoll_try_reap_events(ctx);
+- ret = true;
+- }
+- }
+-
+- ret |= io_poll_remove_all(ctx, task, NULL);
+- ret |= io_kill_timeouts(ctx, task, NULL);
+- ret |= io_run_task_work();
+- if (!ret)
+- break;
+- cond_resched();
++ if (ctx->sq_data)
++ io_sq_thread_park(ctx->sq_data);
+ }
+ }
+
+@@ -8995,11 +8977,10 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
+ }
+
+ io_cancel_defer_files(ctx, task, files);
+- io_cqring_overflow_flush(ctx, true, task, files);
+
+ io_uring_cancel_files(ctx, task, files);
+ if (!files)
+- __io_uring_cancel_task_requests(ctx, task);
++ io_uring_try_cancel_requests(ctx, task, NULL);
+
+ if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
+ atomic_dec(&task->io_uring->in_idle);
+diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
+index eec7928ff8fe0..99580c22f91a4 100644
+--- a/include/linux/eeprom_93xx46.h
++++ b/include/linux/eeprom_93xx46.h
+@@ -16,6 +16,8 @@ struct eeprom_93xx46_platform_data {
+ #define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
+ /* Instructions such as EWEN are (addrlen + 2) in length. */
+ #define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
++/* Add extra cycle after address during a read */
++#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2)
+
+ /*
+ * optional hooks to control additional logic
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index daca06dde99ba..1d7677376e742 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -48,37 +48,14 @@ static int sof_sdw_quirk_cb(const struct dmi_system_id *id)
+ }
+
+ static const struct dmi_system_id sof_sdw_quirk_table[] = {
++ /* CometLake devices */
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+- DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A32")
+- },
+- .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+- SOF_RT711_JD_SRC_JD2 |
+- SOF_RT715_DAI_ID_FIX |
+- SOF_SDW_FOUR_SPK),
+- },
+- {
+- .callback = sof_sdw_quirk_cb,
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+- DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A3E")
+- },
+- .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+- SOF_RT711_JD_SRC_JD2 |
+- SOF_RT715_DAI_ID_FIX),
+- },
+- {
+- .callback = sof_sdw_quirk_cb,
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+- DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A5E")
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "CometLake Client"),
+ },
+- .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+- SOF_RT711_JD_SRC_JD2 |
+- SOF_RT715_DAI_ID_FIX |
+- SOF_SDW_FOUR_SPK),
++ .driver_data = (void *)SOF_SDW_PCH_DMIC,
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+@@ -109,7 +86,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ SOF_RT715_DAI_ID_FIX |
+ SOF_SDW_FOUR_SPK),
+ },
+- {
++ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+@@ -119,6 +96,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ SOF_RT715_DAI_ID_FIX |
+ SOF_SDW_FOUR_SPK),
+ },
++ /* IceLake devices */
++ {
++ .callback = sof_sdw_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Ice Lake Client"),
++ },
++ .driver_data = (void *)SOF_SDW_PCH_DMIC,
++ },
++ /* TigerLake devices */
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+@@ -134,18 +121,23 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Ice Lake Client"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A3E")
+ },
+- .driver_data = (void *)SOF_SDW_PCH_DMIC,
++ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
++ SOF_RT711_JD_SRC_JD2 |
++ SOF_RT715_DAI_ID_FIX),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "CometLake Client"),
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A5E")
+ },
+- .driver_data = (void *)SOF_SDW_PCH_DMIC,
++ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
++ SOF_RT711_JD_SRC_JD2 |
++ SOF_RT715_DAI_ID_FIX |
++ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+@@ -167,7 +159,34 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ SOF_SDW_PCH_DMIC |
+ SOF_SDW_FOUR_SPK),
+ },
+-
++ {
++ /*
++ * this entry covers multiple HP SKUs. The family name
++ * does not seem robust enough, so we use a partial
++ * match that ignores the product name suffix
++ * (e.g. 15-eb1xxx, 14t-ea000 or 13-aw2xxx)
++ */
++ .callback = sof_sdw_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible"),
++ },
++ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
++ SOF_SDW_PCH_DMIC |
++ SOF_RT711_JD_SRC_JD2),
++ },
++ /* TigerLake-SDCA devices */
++ {
++ .callback = sof_sdw_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A32")
++ },
++ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
++ SOF_RT711_JD_SRC_JD2 |
++ SOF_RT715_DAI_ID_FIX |
++ SOF_SDW_FOUR_SPK),
++ },
+ {}
+ };
+
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index df036a359f2fc..448de77f43fd8 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -2603,141 +2603,251 @@ static int snd_bbfpro_controls_create(struct usb_mixer_interface *mixer)
+ }
+
+ /*
+- * Pioneer DJ DJM-250MK2 and maybe other DJM models
++ * Pioneer DJ DJM Mixers
+ *
+- * For playback, no duplicate mapping should be set.
+- * There are three mixer stereo channels (CH1, CH2, AUX)
+- * and three stereo sources (Playback 1-2, Playback 3-4, Playback 5-6).
+- * Each channel should be mapped just once to one source.
+- * If mapped multiple times, only one source will play on given channel
+- * (sources are not mixed together).
++ * These devices generally have options for soft-switching the playback and
++ * capture sources in addition to the recording level. Although different
++ * devices have different configurations, there seems to be canonical values
++ * for specific capture/playback types: See the definitions of these below.
+ *
+- * For recording, duplicate mapping is OK. We will get the same signal multiple times.
+- *
+- * Channels 7-8 are in both directions fixed to FX SEND / FX RETURN.
+- *
+- * See also notes in the quirks-table.h file.
++ * The wValue is masked with the stereo channel number. e.g. Setting Ch2 to
++ * capture phono would be 0x0203. Capture, playback and capture level have
++ * different wIndexes.
+ */
+
+-struct snd_pioneer_djm_option {
+- const u16 wIndex;
+- const u16 wValue;
++// Capture types
++#define SND_DJM_CAP_LINE 0x00
++#define SND_DJM_CAP_CDLINE 0x01
++#define SND_DJM_CAP_DIGITAL 0x02
++#define SND_DJM_CAP_PHONO 0x03
++#define SND_DJM_CAP_PFADER 0x06
++#define SND_DJM_CAP_XFADERA 0x07
++#define SND_DJM_CAP_XFADERB 0x08
++#define SND_DJM_CAP_MIC 0x09
++#define SND_DJM_CAP_AUX 0x0d
++#define SND_DJM_CAP_RECOUT 0x0a
++#define SND_DJM_CAP_NONE 0x0f
++#define SND_DJM_CAP_CH1PFADER 0x11
++#define SND_DJM_CAP_CH2PFADER 0x12
++#define SND_DJM_CAP_CH3PFADER 0x13
++#define SND_DJM_CAP_CH4PFADER 0x14
++
++// Playback types
++#define SND_DJM_PB_CH1 0x00
++#define SND_DJM_PB_CH2 0x01
++#define SND_DJM_PB_AUX 0x04
++
++#define SND_DJM_WINDEX_CAP 0x8002
++#define SND_DJM_WINDEX_CAPLVL 0x8003
++#define SND_DJM_WINDEX_PB 0x8016
++
++// kcontrol->private_value layout
++#define SND_DJM_VALUE_MASK 0x0000ffff
++#define SND_DJM_GROUP_MASK 0x00ff0000
++#define SND_DJM_DEVICE_MASK 0xff000000
++#define SND_DJM_GROUP_SHIFT 16
++#define SND_DJM_DEVICE_SHIFT 24
++
++// device table index
++#define SND_DJM_250MK2_IDX 0x0
++#define SND_DJM_750_IDX 0x1
++#define SND_DJM_900NXS2_IDX 0x2
++
++
++#define SND_DJM_CTL(_name, suffix, _default_value, _windex) { \
++ .name = _name, \
++ .options = snd_djm_opts_##suffix, \
++ .noptions = ARRAY_SIZE(snd_djm_opts_##suffix), \
++ .default_value = _default_value, \
++ .wIndex = _windex }
++
++#define SND_DJM_DEVICE(suffix) { \
++ .controls = snd_djm_ctls_##suffix, \
++ .ncontrols = ARRAY_SIZE(snd_djm_ctls_##suffix) }
++
++
++struct snd_djm_device {
+ const char *name;
++ const struct snd_djm_ctl *controls;
++ size_t ncontrols;
+ };
+
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_capture_level[] = {
+- { .name = "-5 dB", .wValue = 0x0300, .wIndex = 0x8003 },
+- { .name = "-10 dB", .wValue = 0x0200, .wIndex = 0x8003 },
+- { .name = "-15 dB", .wValue = 0x0100, .wIndex = 0x8003 },
+- { .name = "-19 dB", .wValue = 0x0000, .wIndex = 0x8003 }
++struct snd_djm_ctl {
++ const char *name;
++ const u16 *options;
++ size_t noptions;
++ u16 default_value;
++ u16 wIndex;
+ };
+
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_capture_ch12[] = {
+- { .name = "CH1 Control Tone PHONO", .wValue = 0x0103, .wIndex = 0x8002 },
+- { .name = "CH1 Control Tone LINE", .wValue = 0x0100, .wIndex = 0x8002 },
+- { .name = "Post CH1 Fader", .wValue = 0x0106, .wIndex = 0x8002 },
+- { .name = "Cross Fader A", .wValue = 0x0107, .wIndex = 0x8002 },
+- { .name = "Cross Fader B", .wValue = 0x0108, .wIndex = 0x8002 },
+- { .name = "MIC", .wValue = 0x0109, .wIndex = 0x8002 },
+- { .name = "AUX", .wValue = 0x010d, .wIndex = 0x8002 },
+- { .name = "REC OUT", .wValue = 0x010a, .wIndex = 0x8002 }
++static const char *snd_djm_get_label_caplevel(u16 wvalue)
++{
++ switch (wvalue) {
++ case 0x0000: return "-19dB";
++ case 0x0100: return "-15dB";
++ case 0x0200: return "-10dB";
++ case 0x0300: return "-5dB";
++ default: return NULL;
++ }
+ };
+
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_capture_ch34[] = {
+- { .name = "CH2 Control Tone PHONO", .wValue = 0x0203, .wIndex = 0x8002 },
+- { .name = "CH2 Control Tone LINE", .wValue = 0x0200, .wIndex = 0x8002 },
+- { .name = "Post CH2 Fader", .wValue = 0x0206, .wIndex = 0x8002 },
+- { .name = "Cross Fader A", .wValue = 0x0207, .wIndex = 0x8002 },
+- { .name = "Cross Fader B", .wValue = 0x0208, .wIndex = 0x8002 },
+- { .name = "MIC", .wValue = 0x0209, .wIndex = 0x8002 },
+- { .name = "AUX", .wValue = 0x020d, .wIndex = 0x8002 },
+- { .name = "REC OUT", .wValue = 0x020a, .wIndex = 0x8002 }
++static const char *snd_djm_get_label_cap(u16 wvalue)
++{
++ switch (wvalue & 0x00ff) {
++ case SND_DJM_CAP_LINE: return "Control Tone LINE";
++ case SND_DJM_CAP_CDLINE: return "Control Tone CD/LINE";
++ case SND_DJM_CAP_DIGITAL: return "Control Tone DIGITAL";
++ case SND_DJM_CAP_PHONO: return "Control Tone PHONO";
++ case SND_DJM_CAP_PFADER: return "Post Fader";
++ case SND_DJM_CAP_XFADERA: return "Cross Fader A";
++ case SND_DJM_CAP_XFADERB: return "Cross Fader B";
++ case SND_DJM_CAP_MIC: return "Mic";
++ case SND_DJM_CAP_RECOUT: return "Rec Out";
++ case SND_DJM_CAP_AUX: return "Aux";
++ case SND_DJM_CAP_NONE: return "None";
++ case SND_DJM_CAP_CH1PFADER: return "Post Fader Ch1";
++ case SND_DJM_CAP_CH2PFADER: return "Post Fader Ch2";
++ case SND_DJM_CAP_CH3PFADER: return "Post Fader Ch3";
++ case SND_DJM_CAP_CH4PFADER: return "Post Fader Ch4";
++ default: return NULL;
++ }
+ };
+
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_capture_ch56[] = {
+- { .name = "REC OUT", .wValue = 0x030a, .wIndex = 0x8002 },
+- { .name = "Post CH1 Fader", .wValue = 0x0311, .wIndex = 0x8002 },
+- { .name = "Post CH2 Fader", .wValue = 0x0312, .wIndex = 0x8002 },
+- { .name = "Cross Fader A", .wValue = 0x0307, .wIndex = 0x8002 },
+- { .name = "Cross Fader B", .wValue = 0x0308, .wIndex = 0x8002 },
+- { .name = "MIC", .wValue = 0x0309, .wIndex = 0x8002 },
+- { .name = "AUX", .wValue = 0x030d, .wIndex = 0x8002 }
++static const char *snd_djm_get_label_pb(u16 wvalue)
++{
++ switch (wvalue & 0x00ff) {
++ case SND_DJM_PB_CH1: return "Ch1";
++ case SND_DJM_PB_CH2: return "Ch2";
++ case SND_DJM_PB_AUX: return "Aux";
++ default: return NULL;
++ }
+ };
+
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_playback_12[] = {
+- { .name = "CH1", .wValue = 0x0100, .wIndex = 0x8016 },
+- { .name = "CH2", .wValue = 0x0101, .wIndex = 0x8016 },
+- { .name = "AUX", .wValue = 0x0104, .wIndex = 0x8016 }
++static const char *snd_djm_get_label(u16 wvalue, u16 windex)
++{
++ switch (windex) {
++ case SND_DJM_WINDEX_CAPLVL: return snd_djm_get_label_caplevel(wvalue);
++ case SND_DJM_WINDEX_CAP: return snd_djm_get_label_cap(wvalue);
++ case SND_DJM_WINDEX_PB: return snd_djm_get_label_pb(wvalue);
++ default: return NULL;
++ }
+ };
+
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_playback_34[] = {
+- { .name = "CH1", .wValue = 0x0200, .wIndex = 0x8016 },
+- { .name = "CH2", .wValue = 0x0201, .wIndex = 0x8016 },
+- { .name = "AUX", .wValue = 0x0204, .wIndex = 0x8016 }
++
++// DJM-250MK2
++static const u16 snd_djm_opts_cap_level[] = {
++ 0x0000, 0x0100, 0x0200, 0x0300 };
++
++static const u16 snd_djm_opts_250mk2_cap1[] = {
++ 0x0103, 0x0100, 0x0106, 0x0107, 0x0108, 0x0109, 0x010d, 0x010a };
++
++static const u16 snd_djm_opts_250mk2_cap2[] = {
++ 0x0203, 0x0200, 0x0206, 0x0207, 0x0208, 0x0209, 0x020d, 0x020a };
++
++static const u16 snd_djm_opts_250mk2_cap3[] = {
++ 0x030a, 0x0311, 0x0312, 0x0307, 0x0308, 0x0309, 0x030d };
++
++static const u16 snd_djm_opts_250mk2_pb1[] = { 0x0100, 0x0101, 0x0104 };
++static const u16 snd_djm_opts_250mk2_pb2[] = { 0x0200, 0x0201, 0x0204 };
++static const u16 snd_djm_opts_250mk2_pb3[] = { 0x0300, 0x0301, 0x0304 };
++
++static const struct snd_djm_ctl snd_djm_ctls_250mk2[] = {
++ SND_DJM_CTL("Capture Level", cap_level, 0, SND_DJM_WINDEX_CAPLVL),
++ SND_DJM_CTL("Ch1 Input", 250mk2_cap1, 2, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch2 Input", 250mk2_cap2, 2, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch3 Input", 250mk2_cap3, 0, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch1 Output", 250mk2_pb1, 0, SND_DJM_WINDEX_PB),
++ SND_DJM_CTL("Ch2 Output", 250mk2_pb2, 1, SND_DJM_WINDEX_PB),
++ SND_DJM_CTL("Ch3 Output", 250mk2_pb3, 2, SND_DJM_WINDEX_PB)
+ };
+
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_playback_56[] = {
+- { .name = "CH1", .wValue = 0x0300, .wIndex = 0x8016 },
+- { .name = "CH2", .wValue = 0x0301, .wIndex = 0x8016 },
+- { .name = "AUX", .wValue = 0x0304, .wIndex = 0x8016 }
++
++// DJM-750
++static const u16 snd_djm_opts_750_cap1[] = {
++ 0x0101, 0x0103, 0x0106, 0x0107, 0x0108, 0x0109, 0x010a, 0x010f };
++static const u16 snd_djm_opts_750_cap2[] = {
++ 0x0200, 0x0201, 0x0206, 0x0207, 0x0208, 0x0209, 0x020a, 0x020f };
++static const u16 snd_djm_opts_750_cap3[] = {
++ 0x0300, 0x0301, 0x0306, 0x0307, 0x0308, 0x0309, 0x030a, 0x030f };
++static const u16 snd_djm_opts_750_cap4[] = {
++ 0x0401, 0x0403, 0x0406, 0x0407, 0x0408, 0x0409, 0x040a, 0x040f };
++
++static const struct snd_djm_ctl snd_djm_ctls_750[] = {
++ SND_DJM_CTL("Capture Level", cap_level, 0, SND_DJM_WINDEX_CAPLVL),
++ SND_DJM_CTL("Ch1 Input", 750_cap1, 2, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch2 Input", 750_cap2, 2, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch3 Input", 750_cap3, 0, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch4 Input", 750_cap4, 0, SND_DJM_WINDEX_CAP)
+ };
+
+-struct snd_pioneer_djm_option_group {
+- const char *name;
+- const struct snd_pioneer_djm_option *options;
+- const size_t count;
+- const u16 default_value;
++
++// DJM-900NXS2
++static const u16 snd_djm_opts_900nxs2_cap1[] = {
++ 0x0100, 0x0102, 0x0103, 0x0106, 0x0107, 0x0108, 0x0109, 0x010a };
++static const u16 snd_djm_opts_900nxs2_cap2[] = {
++ 0x0200, 0x0202, 0x0203, 0x0206, 0x0207, 0x0208, 0x0209, 0x020a };
++static const u16 snd_djm_opts_900nxs2_cap3[] = {
++ 0x0300, 0x0302, 0x0303, 0x0306, 0x0307, 0x0308, 0x0309, 0x030a };
++static const u16 snd_djm_opts_900nxs2_cap4[] = {
++ 0x0400, 0x0402, 0x0403, 0x0406, 0x0407, 0x0408, 0x0409, 0x040a };
++static const u16 snd_djm_opts_900nxs2_cap5[] = {
++ 0x0507, 0x0508, 0x0509, 0x050a, 0x0511, 0x0512, 0x0513, 0x0514 };
++
++static const struct snd_djm_ctl snd_djm_ctls_900nxs2[] = {
++ SND_DJM_CTL("Capture Level", cap_level, 0, SND_DJM_WINDEX_CAPLVL),
++ SND_DJM_CTL("Ch1 Input", 900nxs2_cap1, 2, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch2 Input", 900nxs2_cap2, 2, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch3 Input", 900nxs2_cap3, 2, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch4 Input", 900nxs2_cap4, 2, SND_DJM_WINDEX_CAP),
++ SND_DJM_CTL("Ch5 Input", 900nxs2_cap5, 3, SND_DJM_WINDEX_CAP)
+ };
+
+-#define snd_pioneer_djm_option_group_item(_name, suffix, _default_value) { \
+- .name = _name, \
+- .options = snd_pioneer_djm_options_##suffix, \
+- .count = ARRAY_SIZE(snd_pioneer_djm_options_##suffix), \
+- .default_value = _default_value }
+-
+-static const struct snd_pioneer_djm_option_group snd_pioneer_djm_option_groups[] = {
+- snd_pioneer_djm_option_group_item("Master Capture Level Capture Switch", capture_level, 0),
+- snd_pioneer_djm_option_group_item("Capture 1-2 Capture Switch", capture_ch12, 2),
+- snd_pioneer_djm_option_group_item("Capture 3-4 Capture Switch", capture_ch34, 2),
+- snd_pioneer_djm_option_group_item("Capture 5-6 Capture Switch", capture_ch56, 0),
+- snd_pioneer_djm_option_group_item("Playback 1-2 Playback Switch", playback_12, 0),
+- snd_pioneer_djm_option_group_item("Playback 3-4 Playback Switch", playback_34, 1),
+- snd_pioneer_djm_option_group_item("Playback 5-6 Playback Switch", playback_56, 2)
++
++static const struct snd_djm_device snd_djm_devices[] = {
++ SND_DJM_DEVICE(250mk2),
++ SND_DJM_DEVICE(750),
++ SND_DJM_DEVICE(900nxs2)
+ };
+
+-// layout of the kcontrol->private_value:
+-#define SND_PIONEER_DJM_VALUE_MASK 0x0000ffff
+-#define SND_PIONEER_DJM_GROUP_MASK 0xffff0000
+-#define SND_PIONEER_DJM_GROUP_SHIFT 16
+
+-static int snd_pioneer_djm_controls_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *info)
++static int snd_djm_controls_info(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_info *info)
+ {
+- u16 group_index = kctl->private_value >> SND_PIONEER_DJM_GROUP_SHIFT;
+- size_t count;
++ unsigned long private_value = kctl->private_value;
++ u8 device_idx = (private_value & SND_DJM_DEVICE_MASK) >> SND_DJM_DEVICE_SHIFT;
++ u8 ctl_idx = (private_value & SND_DJM_GROUP_MASK) >> SND_DJM_GROUP_SHIFT;
++ const struct snd_djm_device *device = &snd_djm_devices[device_idx];
+ const char *name;
+- const struct snd_pioneer_djm_option_group *group;
++ const struct snd_djm_ctl *ctl;
++ size_t noptions;
+
+- if (group_index >= ARRAY_SIZE(snd_pioneer_djm_option_groups))
++ if (ctl_idx >= device->ncontrols)
++ return -EINVAL;
++
++ ctl = &device->controls[ctl_idx];
++ noptions = ctl->noptions;
++ if (info->value.enumerated.item >= noptions)
++ info->value.enumerated.item = noptions - 1;
++
++ name = snd_djm_get_label(ctl->options[info->value.enumerated.item],
++ ctl->wIndex);
++ if (!name)
+ return -EINVAL;
+
+- group = &snd_pioneer_djm_option_groups[group_index];
+- count = group->count;
+- if (info->value.enumerated.item >= count)
+- info->value.enumerated.item = count - 1;
+- name = group->options[info->value.enumerated.item].name;
+ strlcpy(info->value.enumerated.name, name, sizeof(info->value.enumerated.name));
+ info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ info->count = 1;
+- info->value.enumerated.items = count;
++ info->value.enumerated.items = noptions;
+ return 0;
+ }
+
+-static int snd_pioneer_djm_controls_update(struct usb_mixer_interface *mixer, u16 group, u16 value)
++static int snd_djm_controls_update(struct usb_mixer_interface *mixer,
++ u8 device_idx, u8 group, u16 value)
+ {
+ int err;
++ const struct snd_djm_device *device = &snd_djm_devices[device_idx];
+
+- if (group >= ARRAY_SIZE(snd_pioneer_djm_option_groups)
+- || value >= snd_pioneer_djm_option_groups[group].count)
++ if ((group >= device->ncontrols) || value >= device->controls[group].noptions)
+ return -EINVAL;
+
+ err = snd_usb_lock_shutdown(mixer->chip);
+@@ -2748,63 +2858,76 @@ static int snd_pioneer_djm_controls_update(struct usb_mixer_interface *mixer, u1
+ mixer->chip->dev, usb_sndctrlpipe(mixer->chip->dev, 0),
+ USB_REQ_SET_FEATURE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+- snd_pioneer_djm_option_groups[group].options[value].wValue,
+- snd_pioneer_djm_option_groups[group].options[value].wIndex,
++ device->controls[group].options[value],
++ device->controls[group].wIndex,
+ NULL, 0);
+
+ snd_usb_unlock_shutdown(mixer->chip);
+ return err;
+ }
+
+-static int snd_pioneer_djm_controls_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *elem)
++static int snd_djm_controls_get(struct snd_kcontrol *kctl,
++ struct snd_ctl_elem_value *elem)
+ {
+- elem->value.enumerated.item[0] = kctl->private_value & SND_PIONEER_DJM_VALUE_MASK;
++ elem->value.enumerated.item[0] = kctl->private_value & SND_DJM_VALUE_MASK;
+ return 0;
+ }
+
+-static int snd_pioneer_djm_controls_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *elem)
++static int snd_djm_controls_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *elem)
+ {
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kctl);
+ struct usb_mixer_interface *mixer = list->mixer;
+ unsigned long private_value = kctl->private_value;
+- u16 group = (private_value & SND_PIONEER_DJM_GROUP_MASK) >> SND_PIONEER_DJM_GROUP_SHIFT;
++
++ u8 device = (private_value & SND_DJM_DEVICE_MASK) >> SND_DJM_DEVICE_SHIFT;
++ u8 group = (private_value & SND_DJM_GROUP_MASK) >> SND_DJM_GROUP_SHIFT;
+ u16 value = elem->value.enumerated.item[0];
+
+- kctl->private_value = (group << SND_PIONEER_DJM_GROUP_SHIFT) | value;
++ kctl->private_value = ((device << SND_DJM_DEVICE_SHIFT) |
++ (group << SND_DJM_GROUP_SHIFT) |
++ value);
+
+- return snd_pioneer_djm_controls_update(mixer, group, value);
++ return snd_djm_controls_update(mixer, device, group, value);
+ }
+
+-static int snd_pioneer_djm_controls_resume(struct usb_mixer_elem_list *list)
++static int snd_djm_controls_resume(struct usb_mixer_elem_list *list)
+ {
+ unsigned long private_value = list->kctl->private_value;
+- u16 group = (private_value & SND_PIONEER_DJM_GROUP_MASK) >> SND_PIONEER_DJM_GROUP_SHIFT;
+- u16 value = (private_value & SND_PIONEER_DJM_VALUE_MASK);
++ u8 device = (private_value & SND_DJM_DEVICE_MASK) >> SND_DJM_DEVICE_SHIFT;
++ u8 group = (private_value & SND_DJM_GROUP_MASK) >> SND_DJM_GROUP_SHIFT;
++ u16 value = (private_value & SND_DJM_VALUE_MASK);
+
+- return snd_pioneer_djm_controls_update(list->mixer, group, value);
++ return snd_djm_controls_update(list->mixer, device, group, value);
+ }
+
+-static int snd_pioneer_djm_controls_create(struct usb_mixer_interface *mixer)
++static int snd_djm_controls_create(struct usb_mixer_interface *mixer,
++ const u8 device_idx)
+ {
+ int err, i;
+- const struct snd_pioneer_djm_option_group *group;
++ u16 value;
++
++ const struct snd_djm_device *device = &snd_djm_devices[device_idx];
++
+ struct snd_kcontrol_new knew = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .index = 0,
+- .info = snd_pioneer_djm_controls_info,
+- .get = snd_pioneer_djm_controls_get,
+- .put = snd_pioneer_djm_controls_put
++ .info = snd_djm_controls_info,
++ .get = snd_djm_controls_get,
++ .put = snd_djm_controls_put
+ };
+
+- for (i = 0; i < ARRAY_SIZE(snd_pioneer_djm_option_groups); i++) {
+- group = &snd_pioneer_djm_option_groups[i];
+- knew.name = group->name;
+- knew.private_value = (i << SND_PIONEER_DJM_GROUP_SHIFT) | group->default_value;
+- err = snd_pioneer_djm_controls_update(mixer, i, group->default_value);
++ for (i = 0; i < device->ncontrols; i++) {
++ value = device->controls[i].default_value;
++ knew.name = device->controls[i].name;
++ knew.private_value = (
++ (device_idx << SND_DJM_DEVICE_SHIFT) |
++ (i << SND_DJM_GROUP_SHIFT) |
++ value);
++ err = snd_djm_controls_update(mixer, device_idx, i, value);
+ if (err)
+ return err;
+- err = add_single_ctl_with_resume(mixer, 0, snd_pioneer_djm_controls_resume,
++ err = add_single_ctl_with_resume(mixer, 0, snd_djm_controls_resume,
+ &knew, NULL);
+ if (err)
+ return err;
+@@ -2917,7 +3040,13 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
+ err = snd_bbfpro_controls_create(mixer);
+ break;
+ case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
+- err = snd_pioneer_djm_controls_create(mixer);
++ err = snd_djm_controls_create(mixer, SND_DJM_250MK2_IDX);
++ break;
++ case USB_ID(0x08e4, 0x017f): /* Pioneer DJ DJM-750 */
++ err = snd_djm_controls_create(mixer, SND_DJM_750_IDX);
++ break;
++ case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
++ err = snd_djm_controls_create(mixer, SND_DJM_900NXS2_IDX);
+ break;
+ }
+