summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2018-11-27 11:17:33 -0500
committerMike Pagano <mpagano@gentoo.org>2018-11-27 11:17:33 -0500
commit0a7b3b377ca2adb363bd764b113e1f4a3ee75165 (patch)
treec8718a384bab7d13419a6007edb88ca4bbb344df
parentproj/linux-patches: Linux patch 4.14.83 (diff)
downloadlinux-patches-0a7b3b37.tar.gz
linux-patches-0a7b3b37.tar.bz2
linux-patches-0a7b3b37.zip
proj/linux-patches: Linux patch 4.14.844.14-91
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1083_linux-4.14.84.patch2271
2 files changed, 2275 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 37903b0c..b8625f05 100644
--- a/0000_README
+++ b/0000_README
@@ -375,6 +375,10 @@ Patch: 1082-4.14.83.patch
From: http://www.kernel.org
Desc: Linux 4.14.83
+Patch: 1083-4.14.84.patch
+From: http://www.kernel.org
+Desc: Linux 4.14.84
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1083_linux-4.14.84.patch b/1083_linux-4.14.84.patch
new file mode 100644
index 00000000..bc3fcf56
--- /dev/null
+++ b/1083_linux-4.14.84.patch
@@ -0,0 +1,2271 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 9841bad6f271..99a08722124d 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1011,7 +1011,7 @@
+ earlyprintk=serial[,0x...[,baudrate]]
+ earlyprintk=ttySn[,baudrate]
+ earlyprintk=dbgp[debugController#]
+- earlyprintk=pciserial,bus:device.function[,baudrate]
++ earlyprintk=pciserial[,force],bus:device.function[,baudrate]
+ earlyprintk=xdbc[xhciController#]
+
+ earlyprintk is useful when the kernel crashes before
+@@ -1043,6 +1043,10 @@
+
+ The sclp output can only be used on s390.
+
++ The optional "force" to "pciserial" enables use of a
++ PCI device even when its classcode is not of the
++ UART class.
++
+ edac_report= [HW,EDAC] Control how to report EDAC event
+ Format: {"on" | "off" | "force"}
+ on: enable EDAC to report H/W event. May be overridden
+diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
+index ea91cb61a602..43f066cde67d 100644
+--- a/Documentation/x86/x86_64/mm.txt
++++ b/Documentation/x86/x86_64/mm.txt
+@@ -4,8 +4,9 @@ Virtual memory map with 4 level page tables:
+ 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
+ hole caused by [47:63] sign extension
+ ffff800000000000 - ffff87ffffffffff (=43 bits) guard hole, reserved for hypervisor
+-ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all phys. memory
+-ffffc80000000000 - ffffc8ffffffffff (=40 bits) hole
++ffff880000000000 - ffff887fffffffff (=39 bits) LDT remap for PTI
++ffff888000000000 - ffffc87fffffffff (=64 TB) direct mapping of all phys. memory
++ffffc88000000000 - ffffc8ffffffffff (=39 bits) hole
+ ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space
+ ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
+ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
+@@ -30,8 +31,9 @@ Virtual memory map with 5 level page tables:
+ 0000000000000000 - 00ffffffffffffff (=56 bits) user space, different per mm
+ hole caused by [56:63] sign extension
+ ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor
+-ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory
+-ff90000000000000 - ff9fffffffffffff (=52 bits) LDT remap for PTI
++ff10000000000000 - ff10ffffffffffff (=48 bits) LDT remap for PTI
++ff11000000000000 - ff90ffffffffffff (=55 bits) direct mapping of all phys. memory
++ff91000000000000 - ff9fffffffffffff (=3840 TB) hole
+ ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB)
+ ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole
+ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
+diff --git a/Makefile b/Makefile
+index 0f42814095a4..874d72a3e6a7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 83
++SUBLEVEL = 84
+ EXTRAVERSION =
+ NAME = Petit Gorille
+
+diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
+index 43393208229e..d79eaa816f29 100644
+--- a/arch/arm64/include/asm/percpu.h
++++ b/arch/arm64/include/asm/percpu.h
+@@ -93,6 +93,7 @@ static inline unsigned long __percpu_##op(void *ptr, \
+ : [val] "Ir" (val)); \
+ break; \
+ default: \
++ ret = 0; \
+ BUILD_BUG(); \
+ } \
+ \
+@@ -122,6 +123,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
+ ret = READ_ONCE(*(u64 *)ptr);
+ break;
+ default:
++ ret = 0;
+ BUILD_BUG();
+ }
+
+@@ -191,6 +193,7 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+ : [val] "r" (val));
+ break;
+ default:
++ ret = 0;
+ BUILD_BUG();
+ }
+
+diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
+index 22a5921562c7..0417c929d21a 100644
+--- a/arch/arm64/kernel/probes/kprobes.c
++++ b/arch/arm64/kernel/probes/kprobes.c
+@@ -23,7 +23,9 @@
+ #include <linux/slab.h>
+ #include <linux/stop_machine.h>
+ #include <linux/sched/debug.h>
++#include <linux/set_memory.h>
+ #include <linux/stringify.h>
++#include <linux/vmalloc.h>
+ #include <asm/traps.h>
+ #include <asm/ptrace.h>
+ #include <asm/cacheflush.h>
+@@ -42,10 +44,21 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+ static void __kprobes
+ post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
+
++static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
++{
++ void *addrs[1];
++ u32 insns[1];
++
++ addrs[0] = addr;
++ insns[0] = opcode;
++
++ return aarch64_insn_patch_text(addrs, insns, 1);
++}
++
+ static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
+ {
+ /* prepare insn slot */
+- p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);
++ patch_text(p->ainsn.api.insn, p->opcode);
+
+ flush_icache_range((uintptr_t) (p->ainsn.api.insn),
+ (uintptr_t) (p->ainsn.api.insn) +
+@@ -118,15 +131,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ return 0;
+ }
+
+-static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
++void *alloc_insn_page(void)
+ {
+- void *addrs[1];
+- u32 insns[1];
++ void *page;
+
+- addrs[0] = (void *)addr;
+- insns[0] = (u32)opcode;
++ page = vmalloc_exec(PAGE_SIZE);
++ if (page)
++ set_memory_ro((unsigned long)page, 1);
+
+- return aarch64_insn_patch_text(addrs, insns, 1);
++ return page;
+ }
+
+ /* arm kprobe: install breakpoint in text */
+diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
+index 490b12af103c..c52d0efacd14 100644
+--- a/arch/mips/configs/cavium_octeon_defconfig
++++ b/arch/mips/configs/cavium_octeon_defconfig
+@@ -140,6 +140,7 @@ CONFIG_RTC_CLASS=y
+ CONFIG_RTC_DRV_DS1307=y
+ CONFIG_STAGING=y
+ CONFIG_OCTEON_ETHERNET=y
++CONFIG_OCTEON_USB=y
+ # CONFIG_IOMMU_SUPPORT is not set
+ CONFIG_RAS=y
+ CONFIG_EXT4_FS=y
+diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
+index 746d03423333..61e91fee8467 100644
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -376,7 +376,7 @@ static int __hw_perf_event_init(struct perf_event *event)
+ return -ENOENT;
+
+ if (ev > PERF_CPUM_CF_MAX_CTR)
+- return -EINVAL;
++ return -ENOENT;
+
+ /* Obtain the counter set to which the specified counter belongs */
+ set = get_counter_set(ev);
+diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
+index 308564b9bf68..101cadabfc89 100644
+--- a/arch/s390/kernel/vdso32/Makefile
++++ b/arch/s390/kernel/vdso32/Makefile
+@@ -33,7 +33,7 @@ UBSAN_SANITIZE := n
+ $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
+
+ # link rule for the .so file, .lds has to be first
+-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
++$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
+ $(call if_changed,vdso32ld)
+
+ # strip rule for the .so file
+@@ -42,12 +42,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+ # assembly rules for the .S files
+-$(obj-vdso32): %.o: %.S
++$(obj-vdso32): %.o: %.S FORCE
+ $(call if_changed_dep,vdso32as)
+
+ # actual build commands
+ quiet_cmd_vdso32ld = VDSO32L $@
+- cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
++ cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
+ quiet_cmd_vdso32as = VDSO32A $@
+ cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
+
+diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
+index f81ae7998883..36bbafcf4a77 100644
+--- a/arch/s390/kernel/vdso64/Makefile
++++ b/arch/s390/kernel/vdso64/Makefile
+@@ -33,7 +33,7 @@ UBSAN_SANITIZE := n
+ $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
+
+ # link rule for the .so file, .lds has to be first
+-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
++$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
+ $(call if_changed,vdso64ld)
+
+ # strip rule for the .so file
+@@ -42,12 +42,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+ # assembly rules for the .S files
+-$(obj-vdso64): %.o: %.S
++$(obj-vdso64): %.o: %.S FORCE
+ $(call if_changed_dep,vdso64as)
+
+ # actual build commands
+ quiet_cmd_vdso64ld = VDSO64L $@
+- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
++ cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
+ quiet_cmd_vdso64as = VDSO64A $@
+ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
+
+diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
+index 5bd374491f94..6c151b42e65d 100644
+--- a/arch/s390/numa/numa.c
++++ b/arch/s390/numa/numa.c
+@@ -54,6 +54,7 @@ int __node_distance(int a, int b)
+ {
+ return mode->distance ? mode->distance(a, b) : 0;
+ }
++EXPORT_SYMBOL(__node_distance);
+
+ int numa_debug_enabled;
+
+diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
+index c94c3bd70ccd..df4a985716eb 100644
+--- a/arch/um/os-Linux/skas/process.c
++++ b/arch/um/os-Linux/skas/process.c
+@@ -610,6 +610,11 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf)
+ fatal_sigsegv();
+ }
+ longjmp(*switch_buf, 1);
++
++ /* unreachable */
++ printk(UM_KERN_ERR "impossible long jump!");
++ fatal_sigsegv();
++ return 0;
+ }
+
+ void initial_thread_cb_skas(void (*proc)(void *), void *arg)
+diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
+index e1407312c412..74d531f6d518 100644
+--- a/arch/x86/include/asm/page_64_types.h
++++ b/arch/x86/include/asm/page_64_types.h
+@@ -33,14 +33,16 @@
+
+ /*
+ * Set __PAGE_OFFSET to the most negative possible address +
+- * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
+- * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
+- * what Xen requires.
++ * PGDIR_SIZE*17 (pgd slot 273).
++ *
++ * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for
++ * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary,
++ * but it's what Xen requires.
+ */
+ #ifdef CONFIG_X86_5LEVEL
+-#define __PAGE_OFFSET_BASE _AC(0xff10000000000000, UL)
++#define __PAGE_OFFSET_BASE _AC(0xff11000000000000, UL)
+ #else
+-#define __PAGE_OFFSET_BASE _AC(0xffff880000000000, UL)
++#define __PAGE_OFFSET_BASE _AC(0xffff888000000000, UL)
+ #endif
+
+ #ifdef CONFIG_RANDOMIZE_MEMORY
+diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
+index 6b8f73dcbc2c..7764617b8f9c 100644
+--- a/arch/x86/include/asm/pgtable_64_types.h
++++ b/arch/x86/include/asm/pgtable_64_types.h
+@@ -88,16 +88,15 @@ typedef struct { pteval_t pte; } pte_t;
+ # define VMALLOC_SIZE_TB _AC(12800, UL)
+ # define __VMALLOC_BASE _AC(0xffa0000000000000, UL)
+ # define __VMEMMAP_BASE _AC(0xffd4000000000000, UL)
+-# define LDT_PGD_ENTRY _AC(-112, UL)
+-# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
+ #else
+ # define VMALLOC_SIZE_TB _AC(32, UL)
+ # define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
+ # define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
+-# define LDT_PGD_ENTRY _AC(-3, UL)
+-# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
+ #endif
+
++#define LDT_PGD_ENTRY -240UL
++#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
++
+ #ifdef CONFIG_RANDOMIZE_MEMORY
+ # define VMALLOC_START vmalloc_base
+ # define VMEMMAP_START vmemmap_base
+diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
+index 5e801c8c8ce7..374a52fa5296 100644
+--- a/arch/x86/kernel/early_printk.c
++++ b/arch/x86/kernel/early_printk.c
+@@ -213,8 +213,9 @@ static unsigned int mem32_serial_in(unsigned long addr, int offset)
+ * early_pci_serial_init()
+ *
+ * This function is invoked when the early_printk param starts with "pciserial"
+- * The rest of the param should be ",B:D.F,baud" where B, D & F describe the
+- * location of a PCI device that must be a UART device.
++ * The rest of the param should be "[force],B:D.F,baud", where B, D & F describe
++ * the location of a PCI device that must be a UART device. "force" is optional
++ * and overrides the use of an UART device with a wrong PCI class code.
+ */
+ static __init void early_pci_serial_init(char *s)
+ {
+@@ -224,17 +225,23 @@ static __init void early_pci_serial_init(char *s)
+ u32 classcode, bar0;
+ u16 cmdreg;
+ char *e;
++ int force = 0;
+
+-
+- /*
+- * First, part the param to get the BDF values
+- */
+ if (*s == ',')
+ ++s;
+
+ if (*s == 0)
+ return;
+
++ /* Force the use of an UART device with wrong class code */
++ if (!strncmp(s, "force,", 6)) {
++ force = 1;
++ s += 6;
++ }
++
++ /*
++ * Part the param to get the BDF values
++ */
+ bus = (u8)simple_strtoul(s, &e, 16);
+ s = e;
+ if (*s != ':')
+@@ -253,7 +260,7 @@ static __init void early_pci_serial_init(char *s)
+ s++;
+
+ /*
+- * Second, find the device from the BDF
++ * Find the device from the BDF
+ */
+ cmdreg = read_pci_config(bus, slot, func, PCI_COMMAND);
+ classcode = read_pci_config(bus, slot, func, PCI_CLASS_REVISION);
+@@ -264,8 +271,10 @@ static __init void early_pci_serial_init(char *s)
+ */
+ if (((classcode >> 16 != PCI_CLASS_COMMUNICATION_MODEM) &&
+ (classcode >> 16 != PCI_CLASS_COMMUNICATION_SERIAL)) ||
+- (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */
+- return;
++ (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ {
++ if (!force)
++ return;
++ }
+
+ /*
+ * Determine if it is IO or memory mapped
+@@ -289,7 +298,7 @@ static __init void early_pci_serial_init(char *s)
+ }
+
+ /*
+- * Lastly, initialize the hardware
++ * Initialize the hardware
+ */
+ if (*s) {
+ if (strcmp(s, "nocfg") == 0)
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index 26d713ecad34..65df298d4e9e 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -103,14 +103,6 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
+ /*
+ * If PTI is enabled, this maps the LDT into the kernelmode and
+ * usermode tables for the given mm.
+- *
+- * There is no corresponding unmap function. Even if the LDT is freed, we
+- * leave the PTEs around until the slot is reused or the mm is destroyed.
+- * This is harmless: the LDT is always in ordinary memory, and no one will
+- * access the freed slot.
+- *
+- * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
+- * it useful, and the flush would slow down modify_ldt().
+ */
+ static int
+ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
+@@ -119,8 +111,8 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
+ bool is_vmalloc, had_top_level_entry;
+ unsigned long va;
+ spinlock_t *ptl;
++ int i, nr_pages;
+ pgd_t *pgd;
+- int i;
+
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return 0;
+@@ -141,7 +133,9 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
+
+ is_vmalloc = is_vmalloc_addr(ldt->entries);
+
+- for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
++ nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
++
++ for (i = 0; i < nr_pages; i++) {
+ unsigned long offset = i << PAGE_SHIFT;
+ const void *src = (char *)ldt->entries + offset;
+ unsigned long pfn;
+@@ -189,14 +183,42 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
+ }
+ }
+
+- va = (unsigned long)ldt_slot_va(slot);
+- flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
+-
+ ldt->slot = slot;
+ #endif
+ return 0;
+ }
+
++static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
++{
++#ifdef CONFIG_PAGE_TABLE_ISOLATION
++ unsigned long va;
++ int i, nr_pages;
++
++ if (!ldt)
++ return;
++
++ /* LDT map/unmap is only required for PTI */
++ if (!static_cpu_has(X86_FEATURE_PTI))
++ return;
++
++ nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
++
++ for (i = 0; i < nr_pages; i++) {
++ unsigned long offset = i << PAGE_SHIFT;
++ spinlock_t *ptl;
++ pte_t *ptep;
++
++ va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
++ ptep = get_locked_pte(mm, va, &ptl);
++ pte_clear(mm, va, ptep);
++ pte_unmap_unlock(ptep, ptl);
++ }
++
++ va = (unsigned long)ldt_slot_va(ldt->slot);
++ flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, 0);
++#endif /* CONFIG_PAGE_TABLE_ISOLATION */
++}
++
+ static void free_ldt_pgtables(struct mm_struct *mm)
+ {
+ #ifdef CONFIG_PAGE_TABLE_ISOLATION
+@@ -433,6 +455,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ }
+
+ install_ldt(mm, new_ldt);
++ unmap_ldt_struct(mm, old_ldt);
+ free_ldt_struct(old_ldt);
+ error = 0;
+
+diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
+index 8ed11a5b1a9d..b33fa127a613 100644
+--- a/arch/x86/xen/mmu_pv.c
++++ b/arch/x86/xen/mmu_pv.c
+@@ -1869,7 +1869,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+ init_top_pgt[0] = __pgd(0);
+
+ /* Pre-constructed entries are in pfn, so convert to mfn */
+- /* L4[272] -> level3_ident_pgt */
++ /* L4[273] -> level3_ident_pgt */
+ /* L4[511] -> level3_kernel_pgt */
+ convert_pfn_mfn(init_top_pgt);
+
+@@ -1889,8 +1889,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+ addr[0] = (unsigned long)pgd;
+ addr[1] = (unsigned long)l3;
+ addr[2] = (unsigned long)l2;
+- /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
+- * Both L4[272][0] and L4[511][510] have entries that point to the same
++ /* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
++ * Both L4[273][0] and L4[511][510] have entries that point to the same
+ * L2 (PMD) tables. Meaning that if you modify it in __va space
+ * it will be also modified in the __ka space! (But if you just
+ * modify the PMD table to point to other PTE's or none, then you
+diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
+index 88cd949003f3..ecd84d910ed2 100644
+--- a/drivers/acpi/acpi_platform.c
++++ b/drivers/acpi/acpi_platform.c
+@@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = {
+ {"PNP0200", 0}, /* AT DMA Controller */
+ {"ACPI0009", 0}, /* IOxAPIC */
+ {"ACPI000A", 0}, /* IOAPIC */
++ {"SMB0001", 0}, /* ACPI SMBUS virtual device */
+ {"", 0},
+ };
+
+diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
+index 4bde16fb97d8..95600309ce42 100644
+--- a/drivers/acpi/acpi_watchdog.c
++++ b/drivers/acpi/acpi_watchdog.c
+@@ -12,35 +12,51 @@
+ #define pr_fmt(fmt) "ACPI: watchdog: " fmt
+
+ #include <linux/acpi.h>
+-#include <linux/dmi.h>
+ #include <linux/ioport.h>
+ #include <linux/platform_device.h>
+
+ #include "internal.h"
+
+-static const struct dmi_system_id acpi_watchdog_skip[] = {
+- {
+- /*
+- * On Lenovo Z50-70 there are two issues with the WDAT
+- * table. First some of the instructions use RTC SRAM
+- * to store persistent information. This does not work well
+- * with Linux RTC driver. Second, more important thing is
+- * that the instructions do not actually reset the system.
+- *
+- * On this particular system iTCO_wdt seems to work just
+- * fine so we prefer that over WDAT for now.
+- *
+- * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
+- */
+- .ident = "Lenovo Z50-70",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "20354"),
+- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"),
+- },
+- },
+- {}
+-};
++#ifdef CONFIG_RTC_MC146818_LIB
++#include <linux/mc146818rtc.h>
++
++/*
++ * There are several systems where the WDAT table is accessing RTC SRAM to
++ * store persistent information. This does not work well with the Linux RTC
++ * driver so on those systems we skip WDAT driver and prefer iTCO_wdt
++ * instead.
++ *
++ * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
++ */
++static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
++{
++ const struct acpi_wdat_entry *entries;
++ int i;
++
++ entries = (struct acpi_wdat_entry *)(wdat + 1);
++ for (i = 0; i < wdat->entries; i++) {
++ const struct acpi_generic_address *gas;
++
++ gas = &entries[i].register_region;
++ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
++ switch (gas->address) {
++ case RTC_PORT(0):
++ case RTC_PORT(1):
++ case RTC_PORT(2):
++ case RTC_PORT(3):
++ return true;
++ }
++ }
++ }
++
++ return false;
++}
++#else
++static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
++{
++ return false;
++}
++#endif
+
+ static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
+ {
+@@ -50,9 +66,6 @@ static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
+ if (acpi_disabled)
+ return NULL;
+
+- if (dmi_check_system(acpi_watchdog_skip))
+- return NULL;
+-
+ status = acpi_get_table(ACPI_SIG_WDAT, 0,
+ (struct acpi_table_header **)&wdat);
+ if (ACPI_FAILURE(status)) {
+@@ -60,6 +73,11 @@ static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
+ return NULL;
+ }
+
++ if (acpi_watchdog_uses_rtc(wdat)) {
++ pr_info("Skipping WDAT on this system because it uses RTC SRAM\n");
++ return NULL;
++ }
++
+ return wdat;
+ }
+
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 1e2648e4c286..27b202c64c84 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -1491,6 +1491,11 @@ static const struct attribute_group zram_disk_attr_group = {
+ .attrs = zram_disk_attrs,
+ };
+
++static const struct attribute_group *zram_disk_attr_groups[] = {
++ &zram_disk_attr_group,
++ NULL,
++};
++
+ /*
+ * Allocate and initialize new zram device. the function returns
+ * '>= 0' device_id upon success, and negative value otherwise.
+@@ -1568,23 +1573,14 @@ static int zram_add(void)
+ if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
+ blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
+
++ disk_to_dev(zram->disk)->groups = zram_disk_attr_groups;
+ add_disk(zram->disk);
+
+- ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
+- &zram_disk_attr_group);
+- if (ret < 0) {
+- pr_err("Error creating sysfs group for device %d\n",
+- device_id);
+- goto out_free_disk;
+- }
+ strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
+
+ pr_info("Added device: %s\n", zram->disk->disk_name);
+ return device_id;
+
+-out_free_disk:
+- del_gendisk(zram->disk);
+- put_disk(zram->disk);
+ out_free_queue:
+ blk_cleanup_queue(queue);
+ out_free_idr:
+@@ -1612,16 +1608,6 @@ static int zram_remove(struct zram *zram)
+ zram->claim = true;
+ mutex_unlock(&bdev->bd_mutex);
+
+- /*
+- * Remove sysfs first, so no one will perform a disksize
+- * store while we destroy the devices. This also helps during
+- * hot_remove -- zram_reset_device() is the last holder of
+- * ->init_lock, no later/concurrent disksize_store() or any
+- * other sysfs handlers are possible.
+- */
+- sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
+- &zram_disk_attr_group);
+-
+ /* Make sure all the pending I/O are finished */
+ fsync_bdev(bdev);
+ zram_reset_device(zram);
+diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
+index 20724abd38bd..7df6b5b1e7ee 100644
+--- a/drivers/clk/clk-fixed-factor.c
++++ b/drivers/clk/clk-fixed-factor.c
+@@ -210,6 +210,7 @@ static int of_fixed_factor_clk_remove(struct platform_device *pdev)
+ {
+ struct clk *clk = platform_get_drvdata(pdev);
+
++ of_clk_del_provider(pdev->dev.of_node);
+ clk_unregister_fixed_factor(clk);
+
+ return 0;
+diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
+index b5c46b3f8764..6d6475c32ee5 100644
+--- a/drivers/clk/clk-fixed-rate.c
++++ b/drivers/clk/clk-fixed-rate.c
+@@ -200,6 +200,7 @@ static int of_fixed_clk_remove(struct platform_device *pdev)
+ {
+ struct clk *clk = platform_get_drvdata(pdev);
+
++ of_clk_del_provider(pdev->dev.of_node);
+ clk_unregister_fixed_rate(clk);
+
+ return 0;
+diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
+index 25601967d1cd..500a55415e90 100644
+--- a/drivers/clk/samsung/clk-exynos5420.c
++++ b/drivers/clk/samsung/clk-exynos5420.c
+@@ -280,6 +280,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
+ { .offset = GATE_BUS_TOP, .value = 0xffffffff, },
+ { .offset = GATE_BUS_DISP1, .value = 0xffffffff, },
+ { .offset = GATE_IP_PERIC, .value = 0xffffffff, },
++ { .offset = GATE_IP_PERIS, .value = 0xffffffff, },
+ };
+
+ static int exynos5420_clk_suspend(void)
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index d1191ebed072..ed01e3aae0e8 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -120,6 +120,9 @@ static const struct edid_quirk {
+ /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
+ { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
+
++ /* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
++ { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC },
++
+ /* Belinea 10 15 55 */
+ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
+ { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+index f905c214fdd0..5a5b3535411f 100644
+--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
++++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+@@ -160,13 +160,6 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
+ return frm;
+ }
+
+-static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
+-{
+- struct decon_context *ctx = crtc->ctx;
+-
+- return decon_get_frame_count(ctx, false);
+-}
+-
+ static void decon_setup_trigger(struct decon_context *ctx)
+ {
+ if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
+@@ -532,7 +525,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
+ .disable = decon_disable,
+ .enable_vblank = decon_enable_vblank,
+ .disable_vblank = decon_disable_vblank,
+- .get_vblank_counter = decon_get_vblank_counter,
+ .atomic_begin = decon_atomic_begin,
+ .update_plane = decon_update_plane,
+ .disable_plane = decon_disable_plane,
+@@ -550,7 +542,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
+ int ret;
+
+ ctx->drm_dev = drm_dev;
+- drm_dev->max_vblank_count = 0xffffffff;
+
+ for (win = ctx->first_win; win < WINDOWS_NR; win++) {
+ int tmp = (win == ctx->first_win) ? 0 : win;
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+index 6ce0821590df..4787560bf93e 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+@@ -147,16 +147,6 @@ static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc)
+ exynos_crtc->ops->disable_vblank(exynos_crtc);
+ }
+
+-static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc)
+-{
+- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+-
+- if (exynos_crtc->ops->get_vblank_counter)
+- return exynos_crtc->ops->get_vblank_counter(exynos_crtc);
+-
+- return 0;
+-}
+-
+ static const struct drm_crtc_funcs exynos_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+@@ -166,7 +156,6 @@ static const struct drm_crtc_funcs exynos_crtc_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = exynos_drm_crtc_enable_vblank,
+ .disable_vblank = exynos_drm_crtc_disable_vblank,
+- .get_vblank_counter = exynos_drm_crtc_get_vblank_counter,
+ };
+
+ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
+index f8bae4cb4823..d228b5148dbc 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
++++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
+@@ -133,7 +133,6 @@ struct exynos_drm_crtc_ops {
+ void (*disable)(struct exynos_drm_crtc *crtc);
+ int (*enable_vblank)(struct exynos_drm_crtc *crtc);
+ void (*disable_vblank)(struct exynos_drm_crtc *crtc);
+- u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
+ enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+ int (*atomic_check)(struct exynos_drm_crtc *crtc,
+diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
+index 6f819f144cb4..6f67d73b184e 100644
+--- a/drivers/hid/uhid.c
++++ b/drivers/hid/uhid.c
+@@ -12,6 +12,7 @@
+
+ #include <linux/atomic.h>
+ #include <linux/compat.h>
++#include <linux/cred.h>
+ #include <linux/device.h>
+ #include <linux/fs.h>
+ #include <linux/hid.h>
+@@ -722,6 +723,17 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
+
+ switch (uhid->input_buf.type) {
+ case UHID_CREATE:
++ /*
++ * 'struct uhid_create_req' contains a __user pointer which is
++ * copied from, so it's unsafe to allow this with elevated
++ * privileges (e.g. from a setuid binary) or via kernel_write().
++ */
++ if (file->f_cred != current_cred() || uaccess_kernel()) {
++ pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
++ task_tgid_vnr(current), current->comm);
++ ret = -EACCES;
++ goto unlock;
++ }
+ ret = uhid_dev_create(uhid, &uhid->input_buf);
+ break;
+ case UHID_CREATE2:
+diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
+index 5ccdd0b52650..b38f4951c94e 100644
+--- a/drivers/hwmon/ibmpowernv.c
++++ b/drivers/hwmon/ibmpowernv.c
+@@ -126,7 +126,7 @@ static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
+ return sprintf(buf, "%s\n", sdata->label);
+ }
+
+-static int __init get_logical_cpu(int hwcpu)
++static int get_logical_cpu(int hwcpu)
+ {
+ int cpu;
+
+@@ -137,9 +137,8 @@ static int __init get_logical_cpu(int hwcpu)
+ return -ENOENT;
+ }
+
+-static void __init make_sensor_label(struct device_node *np,
+- struct sensor_data *sdata,
+- const char *label)
++static void make_sensor_label(struct device_node *np,
++ struct sensor_data *sdata, const char *label)
+ {
+ u32 id;
+ size_t n;
+diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
+index 568dd4affb33..011907eff660 100644
+--- a/drivers/media/v4l2-core/v4l2-event.c
++++ b/drivers/media/v4l2-core/v4l2-event.c
+@@ -193,6 +193,22 @@ int v4l2_event_pending(struct v4l2_fh *fh)
+ }
+ EXPORT_SYMBOL_GPL(v4l2_event_pending);
+
++static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
++{
++ struct v4l2_fh *fh = sev->fh;
++ unsigned int i;
++
++ lockdep_assert_held(&fh->subscribe_lock);
++ assert_spin_locked(&fh->vdev->fh_lock);
++
++ /* Remove any pending events for this subscription */
++ for (i = 0; i < sev->in_use; i++) {
++ list_del(&sev->events[sev_pos(sev, i)].list);
++ fh->navailable--;
++ }
++ list_del(&sev->list);
++}
++
+ int v4l2_event_subscribe(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub, unsigned elems,
+ const struct v4l2_subscribed_event_ops *ops)
+@@ -225,27 +241,23 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
++ if (!found_ev)
++ list_add(&sev->list, &fh->subscribed);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ if (found_ev) {
+ /* Already listening */
+ kvfree(sev);
+- goto out_unlock;
+- }
+-
+- if (sev->ops && sev->ops->add) {
++ } else if (sev->ops && sev->ops->add) {
+ ret = sev->ops->add(sev, elems);
+ if (ret) {
++ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
++ __v4l2_event_unsubscribe(sev);
++ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ kvfree(sev);
+- goto out_unlock;
+ }
+ }
+
+- spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+- list_add(&sev->list, &fh->subscribed);
+- spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+-
+-out_unlock:
+ mutex_unlock(&fh->subscribe_lock);
+
+ return ret;
+@@ -280,7 +292,6 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
+ {
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+- int i;
+
+ if (sub->type == V4L2_EVENT_ALL) {
+ v4l2_event_unsubscribe_all(fh);
+@@ -292,14 +303,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ sev = v4l2_event_subscribed(fh, sub->type, sub->id);
+- if (sev != NULL) {
+- /* Remove any pending events for this subscription */
+- for (i = 0; i < sev->in_use; i++) {
+- list_del(&sev->events[sev_pos(sev, i)].list);
+- fh->navailable--;
+- }
+- list_del(&sev->list);
+- }
++ if (sev != NULL)
++ __v4l2_event_unsubscribe(sev);
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
+index b2a0340f277e..d8e3cc2dc747 100644
+--- a/drivers/misc/atmel-ssc.c
++++ b/drivers/misc/atmel-ssc.c
+@@ -132,7 +132,7 @@ static const struct of_device_id atmel_ssc_dt_ids[] = {
+ MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids);
+ #endif
+
+-static inline const struct atmel_ssc_platform_data * __init
++static inline const struct atmel_ssc_platform_data *
+ atmel_ssc_get_driver_data(struct platform_device *pdev)
+ {
+ if (pdev->dev.of_node) {
+diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
+index 313da3150262..1540a7785e14 100644
+--- a/drivers/misc/sgi-gru/grukdump.c
++++ b/drivers/misc/sgi-gru/grukdump.c
+@@ -27,6 +27,9 @@
+ #include <linux/delay.h>
+ #include <linux/bitops.h>
+ #include <asm/uv/uv_hub.h>
++
++#include <linux/nospec.h>
++
+ #include "gru.h"
+ #include "grutables.h"
+ #include "gruhandles.h"
+@@ -196,6 +199,7 @@ int gru_dump_chiplet_request(unsigned long arg)
+ /* Currently, only dump by gid is implemented */
+ if (req.gid >= gru_max_gids)
+ return -EINVAL;
++ req.gid = array_index_nospec(req.gid, gru_max_gids);
+
+ gru = GID_TO_GRU(req.gid);
+ ubuf = req.buf;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 04dbf64fb1cb..176c99b8251d 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -9688,6 +9688,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL |
++ NETIF_F_GSO_IPXIP4 |
++ NETIF_F_GSO_IPXIP6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_SCTP_CRC |
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+index 112d24c6c9ce..4904a63b83ef 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+@@ -760,8 +760,10 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ adapter->default_up, vf);
+
+- if (vfinfo->spoofchk_enabled)
++ if (vfinfo->spoofchk_enabled) {
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
++ hw->mac.ops.set_mac_anti_spoofing(hw, true, vf);
++ }
+ }
+
+ /* reset multicast table array for vf */
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
+index ab4ad8a1e2a5..01a213d4ee9c 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
+@@ -167,6 +167,9 @@ struct qed_spq_entry {
+ enum spq_mode comp_mode;
+ struct qed_spq_comp_cb comp_cb;
+ struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
++
++ /* Posted entry for unlimited list entry in EBLOCK mode */
++ struct qed_spq_entry *post_ent;
+ };
+
+ struct qed_eq {
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+index d7c5965328be..b26578464469 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+@@ -80,7 +80,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+
+ case QED_SPQ_MODE_BLOCK:
+ if (!p_data->p_comp_data)
+- return -EINVAL;
++ goto err;
+
+ p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
+ break;
+@@ -95,7 +95,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ default:
+ DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+- return -EINVAL;
++ goto err;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+@@ -109,6 +109,18 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
+
+ return 0;
++
++err:
++ /* qed_spq_get_entry() can either get an entry from the free_pool,
++ * or, if no entries are left, allocate a new entry and add it to
++ * the unlimited_pending list.
++ */
++ if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
++ kfree(p_ent);
++ else
++ qed_spq_return_entry(p_hwfn, p_ent);
++
++ return -EINVAL;
+ }
+
+ static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
+index be48d9abd001..467755b6dd0b 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
+@@ -144,6 +144,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
+
+ DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+ rc = qed_mcp_drain(p_hwfn, p_ptt);
++ qed_ptt_release(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "MCP drain failed\n");
+ goto err;
+@@ -152,18 +153,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
+ /* Retry after drain */
+ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+ if (!rc)
+- goto out;
++ return 0;
+
+ comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
+- if (comp_done->done == 1)
++ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+-out:
+- qed_ptt_release(p_hwfn, p_ptt);
+- return 0;
+-
++ return 0;
++ }
+ err:
+- qed_ptt_release(p_hwfn, p_ptt);
+ DP_NOTICE(p_hwfn,
+ "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
+ le32_to_cpu(p_ent->elem.hdr.cid),
+@@ -687,6 +685,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+ /* EBLOCK responsible to free the allocated p_ent */
+ if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
+ kfree(p_ent);
++ else
++ p_ent->post_ent = p_en2;
+
+ p_ent = p_en2;
+ }
+@@ -770,6 +770,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+ SPQ_HIGH_PRI_RESERVE_DEFAULT);
+ }
+
++/* Avoid overriding of SPQ entries when getting out-of-order completions, by
++ * marking the completions in a bitmap and increasing the chain consumer only
++ * for the first successive completed entries.
++ */
++static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
++{
++ u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
++ struct qed_spq *p_spq = p_hwfn->p_spq;
++
++ __set_bit(pos, p_spq->p_comp_bitmap);
++ while (test_bit(p_spq->comp_bitmap_idx,
++ p_spq->p_comp_bitmap)) {
++ __clear_bit(p_spq->comp_bitmap_idx,
++ p_spq->p_comp_bitmap);
++ p_spq->comp_bitmap_idx++;
++ qed_chain_return_produced(&p_spq->chain);
++ }
++}
++
+ int qed_spq_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent, u8 *fw_return_code)
+ {
+@@ -821,11 +840,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
+ p_ent->queue == &p_spq->unlimited_pending);
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+- /* This is an allocated p_ent which does not need to
+- * return to pool.
+- */
++ struct qed_spq_entry *p_post_ent = p_ent->post_ent;
++
+ kfree(p_ent);
+- return rc;
++
++ /* Return the entry which was actually posted */
++ p_ent = p_post_ent;
+ }
+
+ if (rc)
+@@ -839,7 +859,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
+ spq_post_fail2:
+ spin_lock_bh(&p_spq->lock);
+ list_del(&p_ent->list);
+- qed_chain_return_produced(&p_spq->chain);
++ qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
+
+ spq_post_fail:
+ /* return to the free pool */
+@@ -871,25 +891,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ spin_lock_bh(&p_spq->lock);
+ list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
+ if (p_ent->elem.hdr.echo == echo) {
+- u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
+-
+ list_del(&p_ent->list);
+-
+- /* Avoid overriding of SPQ entries when getting
+- * out-of-order completions, by marking the completions
+- * in a bitmap and increasing the chain consumer only
+- * for the first successive completed entries.
+- */
+- __set_bit(pos, p_spq->p_comp_bitmap);
+-
+- while (test_bit(p_spq->comp_bitmap_idx,
+- p_spq->p_comp_bitmap)) {
+- __clear_bit(p_spq->comp_bitmap_idx,
+- p_spq->p_comp_bitmap);
+- p_spq->comp_bitmap_idx++;
+- qed_chain_return_produced(&p_spq->chain);
+- }
+-
++ qed_spq_comp_bmap_update(p_hwfn, echo);
+ p_spq->comp_count++;
+ found = p_ent;
+ break;
+@@ -928,11 +931,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ QED_MSG_SPQ,
+ "Got a completion without a callback function\n");
+
+- if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
+- (found->queue == &p_spq->unlimited_pending))
++ if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
+ /* EBLOCK is responsible for returning its own entry into the
+- * free list, unless it originally added the entry into the
+- * unlimited pending list.
++ * free list.
+ */
+ qed_spq_return_entry(p_hwfn, found);
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
+index 627fec210e2f..8e2a19616bc9 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -340,7 +340,8 @@ struct dma_features {
+
+ /* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+ #define BUF_SIZE_16KiB 16384
+-#define BUF_SIZE_8KiB 8192
++/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */
++#define BUF_SIZE_8KiB 8188
+ #define BUF_SIZE_4KiB 4096
+ #define BUF_SIZE_2KiB 2048
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+index ca9d7e48034c..40d6356a7e73 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
++++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+@@ -31,7 +31,7 @@
+ /* Enhanced descriptors */
+ static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+ {
+- p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
++ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
+ << ERDES1_BUFFER2_SIZE_SHIFT)
+ & ERDES1_BUFFER2_SIZE_MASK);
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+index 2a828a312814..acd65a4f94d4 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+@@ -262,7 +262,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ int mode, int end)
+ {
+ p->des0 |= cpu_to_le32(RDES0_OWN);
+- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
++ p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
+
+ if (mode == STMMAC_CHAIN_MODE)
+ ehn_desc_rx_set_on_chain(p);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+index 28e4b5d50ce6..1af7b078b94d 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+@@ -143,7 +143,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
+ static int stmmac_set_16kib_bfsize(int mtu)
+ {
+ int ret = 0;
+- if (unlikely(mtu >= BUF_SIZE_8KiB))
++ if (unlikely(mtu > BUF_SIZE_8KiB))
+ ret = BUF_SIZE_16KiB;
+ return ret;
+ }
+diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
+index ea22591ee66f..53dfe67807e3 100644
+--- a/drivers/platform/x86/acerhdf.c
++++ b/drivers/platform/x86/acerhdf.c
+@@ -233,6 +233,7 @@ static const struct bios_settings bios_tbl[] = {
+ {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0},
++ {"Gateway", "LT31", "v1.3307", 0x55, 0x58, {0x9e, 0x00}, 0},
+ /* Packard Bell */
+ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00}, 0},
+ {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0},
+diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
+index d4fc42b4cbeb..401bdc7a9d94 100644
+--- a/drivers/platform/x86/intel_telemetry_debugfs.c
++++ b/drivers/platform/x86/intel_telemetry_debugfs.c
+@@ -968,12 +968,16 @@ static int __init telemetry_debugfs_init(void)
+ debugfs_conf = (struct telemetry_debugfs_conf *)id->driver_data;
+
+ err = telemetry_pltconfig_valid();
+- if (err < 0)
++ if (err < 0) {
++ pr_info("Invalid pltconfig, ensure IPC1 device is enabled in BIOS\n");
+ return -ENODEV;
++ }
+
+ err = telemetry_debugfs_check_evts();
+- if (err < 0)
++ if (err < 0) {
++ pr_info("telemetry_debugfs_check_evts failed\n");
+ return -EINVAL;
++ }
+
+ register_pm_notifier(&pm_notifier);
+
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index cd73172bff47..a19f2dc69e8a 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -358,9 +358,6 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
+
+ QETH_CARD_TEXT(card, 4, "clearip");
+
+- if (recover && card->options.sniffer)
+- return;
+-
+ spin_lock_bh(&card->ip_lock);
+
+ hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
+@@ -818,6 +815,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
+ int rc = 0;
+ int cnt = 3;
+
++ if (card->options.sniffer)
++ return 0;
+
+ if (addr->proto == QETH_PROT_IPV4) {
+ QETH_CARD_TEXT(card, 2, "setaddr4");
+@@ -853,6 +852,9 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
+ {
+ int rc = 0;
+
++ if (card->options.sniffer)
++ return 0;
++
+ if (addr->proto == QETH_PROT_IPV4) {
+ QETH_CARD_TEXT(card, 2, "deladdr4");
+ QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index 0a730136646d..654579bc1e54 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -850,6 +850,8 @@ int __uio_register_device(struct module *owner,
+ if (ret)
+ goto err_uio_dev_add_attributes;
+
++ info->uio_dev = idev;
++
+ if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
+ /*
+ * Note that we deliberately don't use devm_request_irq
+@@ -861,11 +863,12 @@ int __uio_register_device(struct module *owner,
+ */
+ ret = request_irq(info->irq, uio_interrupt,
+ info->irq_flags, info->name, idev);
+- if (ret)
++ if (ret) {
++ info->uio_dev = NULL;
+ goto err_request_irq;
++ }
+ }
+
+- info->uio_dev = idev;
+ return 0;
+
+ err_request_irq:
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index e41d00bc7e97..5a8ef83a5c5c 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1724,6 +1724,9 @@ static const struct usb_device_id acm_ids[] = {
+ { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
++ { USB_DEVICE(0x0572, 0x1349), /* Hiro (Conexant) USB MODEM H50228 */
++ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
++ },
+ { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
+ .driver_info = QUIRK_CONTROL_LINE_STATE, },
+ { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 37a5e07b3488..1e8f68960014 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -243,6 +243,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },
+
++ /* Corsair K70 LUX RGB */
++ { USB_DEVICE(0x1b1c, 0x1b33), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ /* Corsair K70 LUX */
+ { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
+
+@@ -263,6 +266,11 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x2040, 0x7200), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
++ /* Raydium Touchscreen */
++ { USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM },
++
++ { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
++
+ /* DJI CineSSD */
+ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+
+diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
+index 8efdc500e790..288fe3e69d52 100644
+--- a/drivers/usb/misc/appledisplay.c
++++ b/drivers/usb/misc/appledisplay.c
+@@ -63,6 +63,7 @@ static const struct usb_device_id appledisplay_table[] = {
+ { APPLEDISPLAY_DEVICE(0x9219) },
+ { APPLEDISPLAY_DEVICE(0x921c) },
+ { APPLEDISPLAY_DEVICE(0x921d) },
++ { APPLEDISPLAY_DEVICE(0x9222) },
+ { APPLEDISPLAY_DEVICE(0x9236) },
+
+ /* Terminating entry */
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 44a7b2dea688..c5fd5abf7206 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -933,8 +933,8 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
+ struct inode *src_inode = file_inode(src_file);
+ struct inode *target_inode = file_inode(dst_file);
+ struct cifsFileInfo *smb_file_src = src_file->private_data;
+- struct cifsFileInfo *smb_file_target = dst_file->private_data;
+- struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink);
++ struct cifsFileInfo *smb_file_target;
++ struct cifs_tcon *target_tcon;
+ unsigned int xid;
+ int rc;
+
+@@ -948,6 +948,9 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
+ goto out;
+ }
+
++ smb_file_target = dst_file->private_data;
++ target_tcon = tlink_tcon(smb_file_target->tlink);
++
+ /*
+ * Note: cifs case is easier than btrfs since server responsible for
+ * checks for proper open modes and file type and if it wants
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 4e5b05263e4a..3372eedaa94d 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -441,6 +441,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
+ int rc = 0;
+ unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
+ char *name, *value;
++ size_t buf_size = dst_size;
+ size_t name_len, value_len, user_name_len;
+
+ while (src_size > 0) {
+@@ -476,9 +477,10 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
+ /* 'user.' plus a terminating null */
+ user_name_len = 5 + 1 + name_len;
+
+- rc += user_name_len;
+-
+- if (dst_size >= user_name_len) {
++ if (buf_size == 0) {
++ /* skip copy - calc size only */
++ rc += user_name_len;
++ } else if (dst_size >= user_name_len) {
+ dst_size -= user_name_len;
+ memcpy(dst, "user.", 5);
+ dst += 5;
+@@ -486,8 +488,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
+ dst += name_len;
+ *dst = 0;
+ ++dst;
+- } else if (dst_size == 0) {
+- /* skip copy - calc size only */
++ rc += user_name_len;
+ } else {
+ /* stop before overrun buffer */
+ rc = -ERANGE;
+diff --git a/fs/exofs/super.c b/fs/exofs/super.c
+index 819624cfc8da..c9ec652e2fcd 100644
+--- a/fs/exofs/super.c
++++ b/fs/exofs/super.c
+@@ -100,6 +100,7 @@ static int parse_options(char *options, struct exofs_mountopt *opts)
+ token = match_token(p, tokens, args);
+ switch (token) {
+ case Opt_name:
++ kfree(opts->dev_name);
+ opts->dev_name = match_strdup(&args[0]);
+ if (unlikely(!opts->dev_name)) {
+ EXOFS_ERR("Error allocating dev_name");
+@@ -863,8 +864,10 @@ static struct dentry *exofs_mount(struct file_system_type *type,
+ int ret;
+
+ ret = parse_options(data, &opts);
+- if (ret)
++ if (ret) {
++ kfree(opts.dev_name);
+ return ERR_PTR(ret);
++ }
+
+ if (!opts.dev_name)
+ opts.dev_name = dev_name;
+diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
+index 9a8772465a90..da25c49203cc 100644
+--- a/fs/hfs/brec.c
++++ b/fs/hfs/brec.c
+@@ -425,6 +425,10 @@ skip:
+ if (new_node) {
+ __be32 cnid;
+
++ if (!new_node->parent) {
++ hfs_btree_inc_height(tree);
++ new_node->parent = tree->root;
++ }
+ fd->bnode = hfs_bnode_find(tree, new_node->parent);
+ /* create index key and entry */
+ hfs_bnode_read_key(new_node, fd->search_key, 14);
+diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
+index 808f4d8c859c..d3f36982f685 100644
+--- a/fs/hfsplus/brec.c
++++ b/fs/hfsplus/brec.c
+@@ -428,6 +428,10 @@ skip:
+ if (new_node) {
+ __be32 cnid;
+
++ if (!new_node->parent) {
++ hfs_btree_inc_height(tree);
++ new_node->parent = tree->root;
++ }
+ fd->bnode = hfs_bnode_find(tree, new_node->parent);
+ /* create index key and entry */
+ hfs_bnode_read_key(new_node, fd->search_key, 14);
+diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
+index 505f87a8c724..83423192588c 100644
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -185,6 +185,7 @@ struct reiserfs_dentry_buf {
+ struct dir_context ctx;
+ struct dentry *xadir;
+ int count;
++ int err;
+ struct dentry *dentries[8];
+ };
+
+@@ -207,6 +208,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
+
+ dentry = lookup_one_len(name, dbuf->xadir, namelen);
+ if (IS_ERR(dentry)) {
++ dbuf->err = PTR_ERR(dentry);
+ return PTR_ERR(dentry);
+ } else if (d_really_is_negative(dentry)) {
+ /* A directory entry exists, but no file? */
+@@ -215,6 +217,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
+ "not found for file %pd.\n",
+ dentry, dbuf->xadir);
+ dput(dentry);
++ dbuf->err = -EIO;
+ return -EIO;
+ }
+
+@@ -262,6 +265,10 @@ static int reiserfs_for_each_xattr(struct inode *inode,
+ err = reiserfs_readdir_inode(d_inode(dir), &buf.ctx);
+ if (err)
+ break;
++ if (buf.err) {
++ err = buf.err;
++ break;
++ }
+ if (!buf.count)
+ break;
+ for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
+diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
+index 8e42253e5d4d..91a533bd3eb1 100644
+--- a/include/linux/netfilter/ipset/ip_set.h
++++ b/include/linux/netfilter/ipset/ip_set.h
+@@ -312,7 +312,7 @@ enum {
+ extern ip_set_id_t ip_set_get_byname(struct net *net,
+ const char *name, struct ip_set **set);
+ extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
+-extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
++extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
+ extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
+ extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
+
+diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
+index 8e2bab1e8e90..70877f8de7e9 100644
+--- a/include/linux/netfilter/ipset/ip_set_comment.h
++++ b/include/linux/netfilter/ipset/ip_set_comment.h
+@@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
+ rcu_assign_pointer(comment->c, c);
+ }
+
+-/* Used only when dumping a set, protected by rcu_read_lock_bh() */
++/* Used only when dumping a set, protected by rcu_read_lock() */
+ static inline int
+ ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
+ {
+- struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
++ struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
+
+ if (!c)
+ return 0;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 4e89ed8a0fb2..3bc664662081 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5733,14 +5733,17 @@ void __init sched_init_smp(void)
+ /*
+ * There's no userspace yet to cause hotplug operations; hence all the
+ * CPU masks are stable and all blatant races in the below code cannot
+- * happen.
++ * happen. The hotplug lock is nevertheless taken to satisfy lockdep,
++ * but there won't be any contention on it.
+ */
++ cpus_read_lock();
+ mutex_lock(&sched_domains_mutex);
+ sched_init_domains(cpu_active_mask);
+ cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
+ if (cpumask_empty(non_isolated_cpus))
+ cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
+ mutex_unlock(&sched_domains_mutex);
++ cpus_read_unlock();
+
+ /* Move init over to a non-isolated CPU */
+ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
+diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
+index be1010bdc435..565a77220fae 100644
+--- a/lib/raid6/test/Makefile
++++ b/lib/raid6/test/Makefile
+@@ -27,7 +27,7 @@ ifeq ($(ARCH),arm)
+ CFLAGS += -I../../../arch/arm/include -mfpu=neon
+ HAS_NEON = yes
+ endif
+-ifeq ($(ARCH),arm64)
++ifeq ($(ARCH),aarch64)
+ CFLAGS += -I../../../arch/arm64/include
+ HAS_NEON = yes
+ endif
+@@ -41,7 +41,7 @@ ifeq ($(IS_X86),yes)
+ gcc -c -x assembler - >&/dev/null && \
+ rm ./-.o && echo -DCONFIG_AS_AVX512=1)
+ else ifeq ($(HAS_NEON),yes)
+- OBJS += neon.o neon1.o neon2.o neon4.o neon8.o
++ OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
+ CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
+ else
+ HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 5c4e85296cf6..5281da82371a 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -594,9 +594,15 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
+ struct bio_vec bvec;
+ int ret;
+
+- /* sendpage cannot properly handle pages with page_count == 0,
+- * we need to fallback to sendmsg if that's the case */
+- if (page_count(page) >= 1)
++ /*
++ * sendpage cannot properly handle pages with page_count == 0,
++ * we need to fall back to sendmsg if that's the case.
++ *
++ * Same goes for slab pages: skb_can_coalesce() allows
++ * coalescing neighboring slab objects into a single frag which
++ * triggers one of hardened usercopy checks.
++ */
++ if (page_count(page) >= 1 && !PageSlab(page))
+ return __ceph_tcp_sendpage(sock, page, offset, size, more);
+
+ bvec.bv_page = page;
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 9d2ce1459cec..a3f1dc7cf538 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -668,21 +668,20 @@ ip_set_put_byindex(struct net *net, ip_set_id_t index)
+ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
+
+ /* Get the name of a set behind a set index.
+- * We assume the set is referenced, so it does exist and
+- * can't be destroyed. The set cannot be renamed due to
+- * the referencing either.
+- *
++ * Set itself is protected by RCU, but its name isn't: to protect against
++ * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
++ * name.
+ */
+-const char *
+-ip_set_name_byindex(struct net *net, ip_set_id_t index)
++void
++ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name)
+ {
+- const struct ip_set *set = ip_set_rcu_get(net, index);
++ struct ip_set *set = ip_set_rcu_get(net, index);
+
+ BUG_ON(!set);
+- BUG_ON(set->ref == 0);
+
+- /* Referenced, so it's safe */
+- return set->name;
++ read_lock_bh(&ip_set_ref_lock);
++ strncpy(name, set->name, IPSET_MAXNAMELEN);
++ read_unlock_bh(&ip_set_ref_lock);
+ }
+ EXPORT_SYMBOL_GPL(ip_set_name_byindex);
+
+@@ -1128,7 +1127,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
+ if (!set)
+ return -ENOENT;
+
+- read_lock_bh(&ip_set_ref_lock);
++ write_lock_bh(&ip_set_ref_lock);
+ if (set->ref != 0) {
+ ret = -IPSET_ERR_REFERENCED;
+ goto out;
+@@ -1145,7 +1144,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
+ strncpy(set->name, name2, IPSET_MAXNAMELEN);
+
+ out:
+- read_unlock_bh(&ip_set_ref_lock);
++ write_unlock_bh(&ip_set_ref_lock);
+ return ret;
+ }
+
+diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
+index 8602f2595a1a..0e6e40c6f652 100644
+--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
++++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
+@@ -213,13 +213,13 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+
+ if (tb[IPSET_ATTR_CIDR]) {
+ e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
++ if (e.cidr[0] > HOST_MASK)
+ return -IPSET_ERR_INVALID_CIDR;
+ }
+
+ if (tb[IPSET_ATTR_CIDR2]) {
+ e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
++ if (e.cidr[1] > HOST_MASK)
+ return -IPSET_ERR_INVALID_CIDR;
+ }
+
+@@ -492,13 +492,13 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+
+ if (tb[IPSET_ATTR_CIDR]) {
+ e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
++ if (e.cidr[0] > HOST_MASK)
+ return -IPSET_ERR_INVALID_CIDR;
+ }
+
+ if (tb[IPSET_ATTR_CIDR2]) {
+ e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
++ if (e.cidr[1] > HOST_MASK)
+ return -IPSET_ERR_INVALID_CIDR;
+ }
+
+diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
+index 178d4eba013b..75d52aed6fdb 100644
+--- a/net/netfilter/ipset/ip_set_list_set.c
++++ b/net/netfilter/ipset/ip_set_list_set.c
+@@ -156,9 +156,7 @@ __list_set_del_rcu(struct rcu_head * rcu)
+ {
+ struct set_elem *e = container_of(rcu, struct set_elem, rcu);
+ struct ip_set *set = e->set;
+- struct list_set *map = set->data;
+
+- ip_set_put_byindex(map->net, e->id);
+ ip_set_ext_destroy(set, e);
+ kfree(e);
+ }
+@@ -166,15 +164,21 @@ __list_set_del_rcu(struct rcu_head * rcu)
+ static inline void
+ list_set_del(struct ip_set *set, struct set_elem *e)
+ {
++ struct list_set *map = set->data;
++
+ set->elements--;
+ list_del_rcu(&e->list);
++ ip_set_put_byindex(map->net, e->id);
+ call_rcu(&e->rcu, __list_set_del_rcu);
+ }
+
+ static inline void
+-list_set_replace(struct set_elem *e, struct set_elem *old)
++list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
+ {
++ struct list_set *map = set->data;
++
+ list_replace_rcu(&old->list, &e->list);
++ ip_set_put_byindex(map->net, old->id);
+ call_rcu(&old->rcu, __list_set_del_rcu);
+ }
+
+@@ -306,7 +310,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ INIT_LIST_HEAD(&e->list);
+ list_set_init_extensions(set, ext, e);
+ if (n)
+- list_set_replace(e, n);
++ list_set_replace(set, e, n);
+ else if (next)
+ list_add_tail_rcu(&e->list, &next->list);
+ else if (prev)
+@@ -497,6 +501,7 @@ list_set_list(const struct ip_set *set,
+ const struct list_set *map = set->data;
+ struct nlattr *atd, *nested;
+ u32 i = 0, first = cb->args[IPSET_CB_ARG0];
++ char name[IPSET_MAXNAMELEN];
+ struct set_elem *e;
+ int ret = 0;
+
+@@ -515,8 +520,8 @@ list_set_list(const struct ip_set *set,
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+- if (nla_put_string(skb, IPSET_ATTR_NAME,
+- ip_set_name_byindex(map->net, e->id)))
++ ip_set_name_byindex(map->net, e->id, name);
++ if (nla_put_string(skb, IPSET_ATTR_NAME, name))
+ goto nla_put_failure;
+ if (ip_set_put_extensions(skb, set, e, true))
+ goto nla_put_failure;
+diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
+index 1141f08810b6..3fef8c2e545d 100644
+--- a/net/netfilter/xt_IDLETIMER.c
++++ b/net/netfilter/xt_IDLETIMER.c
+@@ -116,6 +116,22 @@ static void idletimer_tg_expired(unsigned long data)
+ schedule_work(&timer->work);
+ }
+
++static int idletimer_check_sysfs_name(const char *name, unsigned int size)
++{
++ int ret;
++
++ ret = xt_check_proc_name(name, size);
++ if (ret < 0)
++ return ret;
++
++ if (!strcmp(name, "power") ||
++ !strcmp(name, "subsystem") ||
++ !strcmp(name, "uevent"))
++ return -EINVAL;
++
++ return 0;
++}
++
+ static int idletimer_tg_create(struct idletimer_tg_info *info)
+ {
+ int ret;
+@@ -126,6 +142,10 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
+ goto out;
+ }
+
++ ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
++ if (ret < 0)
++ goto out_free_timer;
++
+ sysfs_attr_init(&info->timer->attr.attr);
+ info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
+ if (!info->timer->attr.attr.name) {
+diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
+index 13695ba8fc54..4f382805eb9c 100644
+--- a/net/sunrpc/xdr.c
++++ b/net/sunrpc/xdr.c
+@@ -512,7 +512,7 @@ EXPORT_SYMBOL_GPL(xdr_commit_encode);
+ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
+ size_t nbytes)
+ {
+- static __be32 *p;
++ __be32 *p;
+ int space_left;
+ int frag1bytes, frag2bytes;
+
+diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
+index 08ca26bcca77..451654372a76 100644
+--- a/security/apparmor/lib.c
++++ b/security/apparmor/lib.c
+@@ -90,10 +90,12 @@ const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
+ const char *end = fqname + n;
+ const char *name = skipn_spaces(fqname, n);
+
+- if (!name)
+- return NULL;
+ *ns_name = NULL;
+ *ns_len = 0;
++
++ if (!name)
++ return NULL;
++
+ if (name[0] == ':') {
+ char *split = strnchr(&name[1], end - &name[1], ':');
+ *ns_name = skipn_spaces(&name[1], end - &name[1]);
+diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
+index c1d20d951434..4ad9948fe594 100644
+--- a/tools/perf/jvmti/jvmti_agent.c
++++ b/tools/perf/jvmti/jvmti_agent.c
+@@ -125,7 +125,7 @@ perf_get_timestamp(void)
+ }
+
+ static int
+-debug_cache_init(void)
++create_jit_cache_dir(void)
+ {
+ char str[32];
+ char *base, *p;
+@@ -144,8 +144,13 @@ debug_cache_init(void)
+
+ strftime(str, sizeof(str), JIT_LANG"-jit-%Y%m%d", &tm);
+
+- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/", base);
+-
++ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/", base);
++ if (ret >= PATH_MAX) {
++ warnx("jvmti: cannot generate jit cache dir because %s/.debug/"
++ " is too long, please check the cwd, JITDUMPDIR, and"
++ " HOME variables", base);
++ return -1;
++ }
+ ret = mkdir(jit_path, 0755);
+ if (ret == -1) {
+ if (errno != EEXIST) {
+@@ -154,20 +159,32 @@ debug_cache_init(void)
+ }
+ }
+
+- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit", base);
++ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit", base);
++ if (ret >= PATH_MAX) {
++ warnx("jvmti: cannot generate jit cache dir because"
++ " %s/.debug/jit is too long, please check the cwd,"
++ " JITDUMPDIR, and HOME variables", base);
++ return -1;
++ }
+ ret = mkdir(jit_path, 0755);
+ if (ret == -1) {
+ if (errno != EEXIST) {
+- warn("cannot create jit cache dir %s", jit_path);
++ warn("jvmti: cannot create jit cache dir %s", jit_path);
+ return -1;
+ }
+ }
+
+- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit/%s.XXXXXXXX", base, str);
+-
++ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit/%s.XXXXXXXX", base, str);
++ if (ret >= PATH_MAX) {
++ warnx("jvmti: cannot generate jit cache dir because"
++ " %s/.debug/jit/%s.XXXXXXXX is too long, please check"
++ " the cwd, JITDUMPDIR, and HOME variables",
++ base, str);
++ return -1;
++ }
+ p = mkdtemp(jit_path);
+ if (p != jit_path) {
+- warn("cannot create jit cache dir %s", jit_path);
++ warn("jvmti: cannot create jit cache dir %s", jit_path);
+ return -1;
+ }
+
+@@ -228,7 +245,7 @@ void *jvmti_open(void)
+ {
+ char dump_path[PATH_MAX];
+ struct jitheader header;
+- int fd;
++ int fd, ret;
+ FILE *fp;
+
+ init_arch_timestamp();
+@@ -245,12 +262,22 @@ void *jvmti_open(void)
+
+ memset(&header, 0, sizeof(header));
+
+- debug_cache_init();
++ /*
++ * jitdump file dir
++ */
++ if (create_jit_cache_dir() < 0)
++ return NULL;
+
+ /*
+ * jitdump file name
+ */
+- scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
++ ret = snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
++ if (ret >= PATH_MAX) {
++ warnx("jvmti: cannot generate jitdump file full path because"
++ " %s/jit-%i.dump is too long, please check the cwd,"
++ " JITDUMPDIR, and HOME variables", jit_path, getpid());
++ return NULL;
++ }
+
+ fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
+ if (fd == -1)
+diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
+index fcc8984bc329..acad8ba06d77 100644
+--- a/tools/perf/tests/code-reading.c
++++ b/tools/perf/tests/code-reading.c
+@@ -527,6 +527,7 @@ static int do_test_code_reading(bool try_kcore)
+ pid = getpid();
+
+ machine = machine__new_host();
++ machine->env = &perf_env;
+
+ ret = machine__create_kernel_maps(machine);
+ if (ret < 0) {
+diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
+index 6276b340f893..b492cb974aa0 100644
+--- a/tools/perf/util/env.c
++++ b/tools/perf/util/env.c
+@@ -3,6 +3,7 @@
+ #include "env.h"
+ #include "util.h"
+ #include <errno.h>
++#include <sys/utsname.h>
+
+ struct perf_env perf_env;
+
+@@ -87,6 +88,37 @@ int perf_env__read_cpu_topology_map(struct perf_env *env)
+ return 0;
+ }
+
++static int perf_env__read_arch(struct perf_env *env)
++{
++ struct utsname uts;
++
++ if (env->arch)
++ return 0;
++
++ if (!uname(&uts))
++ env->arch = strdup(uts.machine);
++
++ return env->arch ? 0 : -ENOMEM;
++}
++
++static int perf_env__read_nr_cpus_avail(struct perf_env *env)
++{
++ if (env->nr_cpus_avail == 0)
++ env->nr_cpus_avail = cpu__max_present_cpu();
++
++ return env->nr_cpus_avail ? 0 : -ENOENT;
++}
++
++const char *perf_env__raw_arch(struct perf_env *env)
++{
++ return env && !perf_env__read_arch(env) ? env->arch : "unknown";
++}
++
++int perf_env__nr_cpus_avail(struct perf_env *env)
++{
++ return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
++}
++
+ void cpu_cache_level__free(struct cpu_cache_level *cache)
+ {
+ free(cache->type);
+diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
+index 1eb35b190b34..9aace8452751 100644
+--- a/tools/perf/util/env.h
++++ b/tools/perf/util/env.h
+@@ -65,4 +65,8 @@ int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
+ int perf_env__read_cpu_topology_map(struct perf_env *env);
+
+ void cpu_cache_level__free(struct cpu_cache_level *cache);
++
++const char *perf_env__raw_arch(struct perf_env *env);
++int perf_env__nr_cpus_avail(struct perf_env *env);
++
+ #endif /* __PERF_ENV_H */
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index bd5d5b5e2218..968fd0454e6b 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -818,6 +818,102 @@ static int machine__get_running_kernel_start(struct machine *machine,
+ return 0;
+ }
+
++/* Kernel-space maps for symbols that are outside the main kernel map and module maps */
++struct extra_kernel_map {
++ u64 start;
++ u64 end;
++ u64 pgoff;
++};
++
++static int machine__create_extra_kernel_map(struct machine *machine,
++ struct dso *kernel,
++ struct extra_kernel_map *xm)
++{
++ struct kmap *kmap;
++ struct map *map;
++
++ map = map__new2(xm->start, kernel, MAP__FUNCTION);
++ if (!map)
++ return -1;
++
++ map->end = xm->end;
++ map->pgoff = xm->pgoff;
++
++ kmap = map__kmap(map);
++
++ kmap->kmaps = &machine->kmaps;
++
++ map_groups__insert(&machine->kmaps, map);
++
++ pr_debug2("Added extra kernel map %" PRIx64 "-%" PRIx64 "\n",
++ map->start, map->end);
++
++ map__put(map);
++
++ return 0;
++}
++
++static u64 find_entry_trampoline(struct dso *dso)
++{
++ /* Duplicates are removed so lookup all aliases */
++ const char *syms[] = {
++ "_entry_trampoline",
++ "__entry_trampoline_start",
++ "entry_SYSCALL_64_trampoline",
++ };
++ struct symbol *sym = dso__first_symbol(dso, MAP__FUNCTION);
++ unsigned int i;
++
++ for (; sym; sym = dso__next_symbol(sym)) {
++ if (sym->binding != STB_GLOBAL)
++ continue;
++ for (i = 0; i < ARRAY_SIZE(syms); i++) {
++ if (!strcmp(sym->name, syms[i]))
++ return sym->start;
++ }
++ }
++
++ return 0;
++}
++
++/*
++ * These values can be used for kernels that do not have symbols for the entry
++ * trampolines in kallsyms.
++ */
++#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
++#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
++#define X86_64_ENTRY_TRAMPOLINE 0x6000
++
++/* Map x86_64 PTI entry trampolines */
++int machine__map_x86_64_entry_trampolines(struct machine *machine,
++ struct dso *kernel)
++{
++ u64 pgoff = find_entry_trampoline(kernel);
++ int nr_cpus_avail, cpu;
++
++ if (!pgoff)
++ return 0;
++
++ nr_cpus_avail = machine__nr_cpus_avail(machine);
++
++ /* Add a 1 page map for each CPU's entry trampoline */
++ for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
++ u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
++ cpu * X86_64_CPU_ENTRY_AREA_SIZE +
++ X86_64_ENTRY_TRAMPOLINE;
++ struct extra_kernel_map xm = {
++ .start = va,
++ .end = va + page_size,
++ .pgoff = pgoff,
++ };
++
++ if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
++ return -1;
++ }
++
++ return 0;
++}
++
+ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
+ {
+ int type;
+@@ -2238,6 +2334,20 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
+ return 0;
+ }
+
++/*
++ * Compares the raw arch string. N.B. see instead perf_env__arch() if a
++ * normalized arch is needed.
++ */
++bool machine__is(struct machine *machine, const char *arch)
++{
++ return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
++}
++
++int machine__nr_cpus_avail(struct machine *machine)
++{
++ return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
++}
++
+ int machine__get_kernel_start(struct machine *machine)
+ {
+ struct map *map = machine__kernel_map(machine);
+@@ -2254,7 +2364,12 @@ int machine__get_kernel_start(struct machine *machine)
+ machine->kernel_start = 1ULL << 63;
+ if (map) {
+ err = map__load(map);
+- if (!err)
++ /*
++ * On x86_64, PTI entry trampolines are less than the
++ * start of kernel text, but still above 2^63. So leave
++ * kernel_start = 1ULL << 63 for x86_64.
++ */
++ if (!err && !machine__is(machine, "x86_64"))
+ machine->kernel_start = map->start;
+ }
+ return err;
+diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
+index d551aa80a59b..13041b036a5b 100644
+--- a/tools/perf/util/machine.h
++++ b/tools/perf/util/machine.h
+@@ -169,6 +169,9 @@ static inline bool machine__is_host(struct machine *machine)
+ return machine ? machine->pid == HOST_KERNEL_ID : false;
+ }
+
++bool machine__is(struct machine *machine, const char *arch);
++int machine__nr_cpus_avail(struct machine *machine);
++
+ struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
+ struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
+
+@@ -263,4 +266,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
+ */
+ char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp);
+
++int machine__map_x86_64_entry_trampolines(struct machine *machine,
++ struct dso *kernel);
++
+ #endif /* __PERF_MACHINE_H */
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index 2de770511e70..8ad4296de98b 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -338,7 +338,17 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *
+ plt_entry_size = 16;
+ break;
+
+- default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/sparc/xtensa need to be checked */
++ case EM_SPARC:
++ plt_header_size = 48;
++ plt_entry_size = 12;
++ break;
++
++ case EM_SPARCV9:
++ plt_header_size = 128;
++ plt_entry_size = 32;
++ break;
++
++ default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */
+ plt_header_size = shdr_plt.sh_entsize;
+ plt_entry_size = shdr_plt.sh_entsize;
+ break;
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index ec40e47aa198..3936f69f385c 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -1513,20 +1513,22 @@ int dso__load(struct dso *dso, struct map *map)
+ goto out;
+ }
+
++ if (map->groups && map->groups->machine)
++ machine = map->groups->machine;
++ else
++ machine = NULL;
++
+ if (dso->kernel) {
+ if (dso->kernel == DSO_TYPE_KERNEL)
+ ret = dso__load_kernel_sym(dso, map);
+ else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ ret = dso__load_guest_kernel_sym(dso, map);
+
++ if (machine__is(machine, "x86_64"))
++ machine__map_x86_64_entry_trampolines(machine, dso);
+ goto out;
+ }
+
+- if (map->groups && map->groups->machine)
+- machine = map->groups->machine;
+- else
+- machine = NULL;
+-
+ dso->adjust_symbols = 0;
+
+ if (perfmap) {