summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2012-07-29 20:14:42 -0400
committerAnthony G. Basile <blueness@gentoo.org>2012-07-29 20:14:42 -0400
commit186635ac6f72fb55b072ad23c93c102dd65e50d6 (patch)
tree66877e04242faef02a6921f570f40307b7f5d8a0
parentGrsec/PaX: 2.9.1-{2.6.32.59,3.2.23,3.4.6}-201207242237 (diff)
downloadhardened-patchset-186635ac6f72fb55b072ad23c93c102dd65e50d6.tar.gz
hardened-patchset-186635ac6f72fb55b072ad23c93c102dd65e50d6.tar.bz2
hardened-patchset-186635ac6f72fb55b072ad23c93c102dd65e50d6.zip
Grsec/PaX: 2.9.1-{2.6.32.59,3.2.24,3.4.6}-20120728194620120728
-rw-r--r--2.6.32/0000_README2
-rw-r--r--2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201207281944.patch (renamed from 2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201207242236.patch)1612
-rw-r--r--2.6.32/4450_grsec-kconfig-default-gids.patch2
-rw-r--r--3.2.24/0000_README (renamed from 3.2.23/0000_README)6
-rw-r--r--3.2.24/1021_linux-3.2.22.patch (renamed from 3.2.23/1021_linux-3.2.22.patch)0
-rw-r--r--3.2.24/1022_linux-3.2.23.patch (renamed from 3.2.23/1022_linux-3.2.23.patch)0
-rw-r--r--3.2.24/1023_linux-3.2.24.patch4684
-rw-r--r--3.2.24/4420_grsecurity-2.9.1-3.2.24-201207281946.patch (renamed from 3.2.23/4420_grsecurity-2.9.1-3.2.23-201207242236.patch)1872
-rw-r--r--3.2.24/4430_grsec-remove-localversion-grsec.patch (renamed from 3.2.23/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.2.24/4435_grsec-mute-warnings.patch (renamed from 3.2.23/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.2.24/4440_grsec-remove-protected-paths.patch (renamed from 3.2.23/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.2.24/4450_grsec-kconfig-default-gids.patch (renamed from 3.2.23/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.2.24/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.2.23/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.2.24/4470_disable-compat_vdso.patch (renamed from 3.2.23/4470_disable-compat_vdso.patch)0
-rw-r--r--3.4.6/0000_README2
-rw-r--r--3.4.6/4420_grsecurity-2.9.1-3.4.6-201207281946.patch (renamed from 3.4.6/4420_grsecurity-2.9.1-3.4.6-201207242237.patch)294
16 files changed, 7212 insertions, 1262 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index 9b8dd9a..d4f6601 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -30,7 +30,7 @@ Patch: 1058_linux-2.6.32.59.patch
From: http://www.kernel.org
Desc: Linux 2.6.32.59
-Patch: 4420_grsecurity-2.9.1-2.6.32.59-201207242236.patch
+Patch: 4420_grsecurity-2.9.1-2.6.32.59-201207281944.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201207242236.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201207281944.patch
index adbc4d5..227df5e 100644
--- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201207242236.patch
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201207281944.patch
@@ -1,5 +1,5 @@
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
-index e1efc40..e7a5667 100644
+index e1efc40..3569a2f 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -1,15 +1,20 @@
@@ -23,7 +23,7 @@ index e1efc40..e7a5667 100644
*.grep
*.grp
*.gz
-@@ -38,8 +43,10 @@
+@@ -38,22 +43,30 @@
*.tab.h
*.tex
*.ver
@@ -34,7 +34,11 @@ index e1efc40..e7a5667 100644
*_vga16.c
*~
*.9
-@@ -49,11 +56,16 @@
+ *.9.gz
+-.*
++.[^g]*
++.gen*
+ .mm
53c700_d.h
CVS
ChangeSet
@@ -51,7 +55,7 @@ index e1efc40..e7a5667 100644
SCCS
System.map*
TAGS
-@@ -62,6 +74,7 @@ aic7*reg_print.c*
+@@ -62,6 +75,7 @@ aic7*reg_print.c*
aic7*seq.h*
aicasm
aicdb.h*
@@ -59,7 +63,7 @@ index e1efc40..e7a5667 100644
asm-offsets.h
asm_offsets.h
autoconf.h*
-@@ -76,7 +89,11 @@ btfixupprep
+@@ -76,7 +90,11 @@ btfixupprep
build
bvmlinux
bzImage*
@@ -71,7 +75,7 @@ index e1efc40..e7a5667 100644
comp*.log
compile.h*
conf
-@@ -84,6 +101,8 @@ config
+@@ -84,6 +102,8 @@ config
config-*
config_data.h*
config_data.gz*
@@ -80,7 +84,7 @@ index e1efc40..e7a5667 100644
conmakehash
consolemap_deftbl.c*
cpustr.h
-@@ -97,19 +116,23 @@ elfconfig.h*
+@@ -97,19 +117,23 @@ elfconfig.h*
fixdep
fore200e_mkfirm
fore200e_pca_fw.c*
@@ -105,7 +109,7 @@ index e1efc40..e7a5667 100644
keywords.c
ksym.c*
ksym.h*
-@@ -117,6 +140,7 @@ kxgettext
+@@ -117,6 +141,7 @@ kxgettext
lkc_defs.h
lex.c
lex.*.c
@@ -113,7 +117,7 @@ index e1efc40..e7a5667 100644
logo_*.c
logo_*_clut224.c
logo_*_mono.c
-@@ -127,13 +151,16 @@ machtypes.h
+@@ -127,13 +152,16 @@ machtypes.h
map
maui_boot.h
mconf
@@ -130,7 +134,7 @@ index e1efc40..e7a5667 100644
mktables
mktree
modpost
-@@ -149,6 +176,7 @@ patches*
+@@ -149,6 +177,7 @@ patches*
pca200e.bin
pca200e_ecd.bin2
piggy.gz
@@ -138,7 +142,7 @@ index e1efc40..e7a5667 100644
piggyback
pnmtologo
ppc_defs.h*
-@@ -157,12 +185,16 @@ qconf
+@@ -157,12 +186,16 @@ qconf
raid6altivec*.c
raid6int*.c
raid6tables.c
@@ -155,7 +159,7 @@ index e1efc40..e7a5667 100644
sm_tbl*
split-include
syscalltab.h
-@@ -171,6 +203,7 @@ tftpboot.img
+@@ -171,6 +204,7 @@ tftpboot.img
timeconst.h
times.h*
trix_boot.h
@@ -163,7 +167,7 @@ index e1efc40..e7a5667 100644
utsrelease.h*
vdso-syms.lds
vdso.lds
-@@ -186,14 +219,20 @@ version.h*
+@@ -186,14 +220,20 @@ version.h*
vmlinux
vmlinux-*
vmlinux.aout
@@ -185,10 +189,23 @@ index e1efc40..e7a5667 100644
zconf.hash.c
+zoffset.h
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index c840e7d..ad11cac 100644
+index c840e7d..30f0efe 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
-@@ -1725,6 +1725,11 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -856,6 +856,12 @@ and is between 256 and 4096 characters. It is defined in the file
+ If specified, z/VM IUCV HVC accepts connections
+ from listed z/VM user IDs only.
+
++ keep_bootcon [KNL]
++ Do not unregister boot console at start. This is only
++ useful for debugging when something happens in the window
++ between unregistering the boot console and initializing
++ the real console.
++
+ i2c_bus= [HW] Override the default board specific I2C bus speed
+ or register an additional I2C bus that is not
+ registered from board initialization code.
+@@ -1725,6 +1731,11 @@ and is between 256 and 4096 characters. It is defined in the file
noresidual [PPC] Don't use residual data on PReP machines.
@@ -200,7 +217,7 @@ index c840e7d..ad11cac 100644
noresume [SWSUSP] Disables resume and restores original swap
space.
-@@ -1837,6 +1842,13 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -1837,6 +1848,13 @@ and is between 256 and 4096 characters. It is defined in the file
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
@@ -234,7 +251,7 @@ index 613da5d..4fe3eda 100644
M: Liam Girdwood <lrg@slimlogic.co.uk>
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
diff --git a/Makefile b/Makefile
-index 3a9a721..b81a4d5 100644
+index 3a9a721..20e2d81 100644
--- a/Makefile
+++ b/Makefile
@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -269,7 +286,7 @@ index 3a9a721..b81a4d5 100644
include/linux/version.h headers_% \
kernelrelease kernelversion
-@@ -526,6 +527,56 @@ else
+@@ -526,6 +527,60 @@ else
KBUILD_CFLAGS += -O2
endif
@@ -302,10 +319,14 @@ index 3a9a721..b81a4d5 100644
+ifdef CONFIG_PAX_SIZE_OVERFLOW
+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
+endif
++ifdef CONFIG_PAX_LATENT_ENTROPY
++LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
++endif
+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
-+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
-+export PLUGINCC CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
++export PLUGINCC CONSTIFY_PLUGIN
+ifeq ($(KBUILD_EXTMOD),)
+gcc-plugins:
+ $(Q)$(MAKE) $(build)=tools/gcc
@@ -326,7 +347,7 @@ index 3a9a721..b81a4d5 100644
include $(srctree)/arch/$(SRCARCH)/Makefile
ifneq ($(CONFIG_FRAME_WARN),0)
-@@ -647,7 +698,7 @@ export mod_strip_cmd
+@@ -647,7 +702,7 @@ export mod_strip_cmd
ifeq ($(KBUILD_EXTMOD),)
@@ -335,7 +356,7 @@ index 3a9a721..b81a4d5 100644
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -868,6 +919,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
+@@ -868,6 +923,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
@@ -344,7 +365,7 @@ index 3a9a721..b81a4d5 100644
$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
# Handle descending into subdirectories listed in $(vmlinux-dirs)
-@@ -877,7 +930,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+@@ -877,7 +934,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
# Error messages still appears in the original language
PHONY += $(vmlinux-dirs)
@@ -353,7 +374,7 @@ index 3a9a721..b81a4d5 100644
$(Q)$(MAKE) $(build)=$@
# Build the kernel release string
-@@ -986,6 +1039,7 @@ prepare0: archprepare FORCE
+@@ -986,6 +1043,7 @@ prepare0: archprepare FORCE
$(Q)$(MAKE) $(build)=. missing-syscalls
# All the preparing..
@@ -361,7 +382,7 @@ index 3a9a721..b81a4d5 100644
prepare: prepare0
# The asm symlink changes when $(ARCH) changes.
-@@ -1127,6 +1181,8 @@ all: modules
+@@ -1127,6 +1185,8 @@ all: modules
# using awk while concatenating to the final file.
PHONY += modules
@@ -370,7 +391,7 @@ index 3a9a721..b81a4d5 100644
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
@$(kecho) ' Building modules, stage 2.';
-@@ -1136,7 +1192,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
+@@ -1136,7 +1196,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
# Target to prepare building external modules
PHONY += modules_prepare
@@ -379,7 +400,7 @@ index 3a9a721..b81a4d5 100644
# Target to install modules
PHONY += modules_install
-@@ -1199,9 +1255,9 @@ CLEAN_FILES += vmlinux System.map \
+@@ -1199,9 +1259,9 @@ CLEAN_FILES += vmlinux System.map \
MRPROPER_DIRS += include/config include2 usr/include include/generated
MRPROPER_FILES += .config .config.old include/asm .version .old_version \
include/linux/autoconf.h include/linux/version.h \
@@ -391,7 +412,7 @@ index 3a9a721..b81a4d5 100644
# clean - Delete most, but leave enough to build external modules
#
-@@ -1245,7 +1301,7 @@ distclean: mrproper
+@@ -1245,7 +1305,7 @@ distclean: mrproper
@find $(srctree) $(RCS_FIND_IGNORE) \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
@@ -400,7 +421,7 @@ index 3a9a721..b81a4d5 100644
-o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
-type f -print | xargs rm -f
-@@ -1292,6 +1348,7 @@ help:
+@@ -1292,6 +1352,7 @@ help:
@echo ' modules_prepare - Set up for building external modules'
@echo ' tags/TAGS - Generate tags file for editors'
@echo ' cscope - Generate cscope index'
@@ -408,7 +429,7 @@ index 3a9a721..b81a4d5 100644
@echo ' kernelrelease - Output the release version string'
@echo ' kernelversion - Output the version stored in Makefile'
@echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
-@@ -1393,6 +1450,8 @@ PHONY += $(module-dirs) modules
+@@ -1393,6 +1454,8 @@ PHONY += $(module-dirs) modules
$(module-dirs): crmodverdir $(objtree)/Module.symvers
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
@@ -417,7 +438,7 @@ index 3a9a721..b81a4d5 100644
modules: $(module-dirs)
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1448,7 +1507,7 @@ endif # KBUILD_EXTMOD
+@@ -1448,7 +1511,7 @@ endif # KBUILD_EXTMOD
quiet_cmd_tags = GEN $@
cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
@@ -426,7 +447,7 @@ index 3a9a721..b81a4d5 100644
$(call cmd,tags)
# Scripts to check various things for consistency
-@@ -1513,17 +1572,21 @@ else
+@@ -1513,17 +1576,21 @@ else
target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
endif
@@ -452,7 +473,7 @@ index 3a9a721..b81a4d5 100644
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.symtypes: %.c prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1533,11 +1596,15 @@ endif
+@@ -1533,11 +1600,15 @@ endif
$(cmd_crmodverdir)
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
$(build)=$(build-dir)
@@ -7481,7 +7502,7 @@ index 79836a7..62f47a2 100644
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
obj-y += fault_$(BITS).o
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
-index b99f81c..3453e93 100644
+index b99f81c..16c0132 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -21,6 +21,9 @@
@@ -7494,7 +7515,7 @@ index b99f81c..3453e93 100644
#include <asm/system.h>
#include <asm/page.h>
-@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+@@ -167,6 +170,276 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
return safe_compute_effective_address(regs, insn);
}
@@ -7584,40 +7605,49 @@ index b99f81c..3453e93 100644
+ }
+ } while (0);
+
-+ { /* PaX: patched PLT emulation #2 */
++ do { /* PaX: patched PLT emulation #2 */
+ unsigned int ba;
+
+ err = get_user(ba, (unsigned int *)regs->pc);
+
-+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
+ unsigned int addr;
+
-+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
+ regs->pc = addr;
+ regs->npc = addr+4;
+ return 2;
+ }
-+ }
++ } while (0);
+
+ do { /* PaX: patched PLT emulation #3 */
-+ unsigned int sethi, jmpl, nop;
++ unsigned int sethi, bajmpl, nop;
+
+ err = get_user(sethi, (unsigned int *)regs->pc);
-+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
+
+ if (err)
+ break;
+
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
+ nop == 0x01000000U)
+ {
+ unsigned int addr;
+
+ addr = (sethi & 0x003FFFFFU) << 10;
+ regs->u_regs[UREG_G1] = addr;
-+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ else
++ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
+ regs->pc = addr;
+ regs->npc = addr+4;
+ return 2;
@@ -7762,7 +7792,7 @@ index b99f81c..3453e93 100644
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
unsigned long address)
{
-@@ -231,6 +495,24 @@ good_area:
+@@ -231,6 +504,24 @@ good_area:
if(!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
@@ -7788,7 +7818,7 @@ index b99f81c..3453e93 100644
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
-index 43b0da9..a0b78f9 100644
+index 43b0da9..f9f9985 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -20,6 +20,9 @@
@@ -7810,7 +7840,7 @@ index 43b0da9..a0b78f9 100644
printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
dump_stack();
unhandled_fault(regs->tpc, current, regs);
-@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
+@@ -249,6 +252,465 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
show_regs(regs);
}
@@ -7904,15 +7934,21 @@ index 43b0da9..a0b78f9 100644
+ }
+ } while (0);
+
-+ { /* PaX: patched PLT emulation #2 */
++ do { /* PaX: patched PLT emulation #2 */
+ unsigned int ba;
+
+ err = get_user(ba, (unsigned int *)regs->tpc);
+
-+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
+ unsigned long addr;
+
-+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
+
+ if (test_thread_flag(TIF_32BIT))
+ addr &= 0xFFFFFFFFUL;
@@ -7921,27 +7957,30 @@ index 43b0da9..a0b78f9 100644
+ regs->tnpc = addr+4;
+ return 2;
+ }
-+ }
++ } while (0);
+
+ do { /* PaX: patched PLT emulation #3 */
-+ unsigned int sethi, jmpl, nop;
++ unsigned int sethi, bajmpl, nop;
+
+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
+
+ if (err)
+ break;
+
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
+ nop == 0x01000000U)
+ {
+ unsigned long addr;
+
+ addr = (sethi & 0x003FFFFFU) << 10;
+ regs->u_regs[UREG_G1] = addr;
-+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ else
++ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
+
+ if (test_thread_flag(TIF_32BIT))
+ addr &= 0xFFFFFFFFUL;
@@ -8267,7 +8306,7 @@ index 43b0da9..a0b78f9 100644
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
-@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+@@ -315,6 +777,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
if (!vma)
goto bad_area;
@@ -10177,7 +10216,7 @@ index 0000000..0d9ec77
+
+#endif /* ASM_X86_ARCHRANDOM_H */
diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
-index dc5a667..939040c 100644
+index dc5a667..7a2470f 100644
--- a/arch/x86/include/asm/atomic_32.h
+++ b/arch/x86/include/asm/atomic_32.h
@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
@@ -10506,7 +10545,7 @@ index dc5a667..939040c 100644
/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
-@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
+@@ -227,32 +439,73 @@ static inline int atomic_xchg(atomic_t *v, int new)
*/
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
@@ -10550,7 +10589,47 @@ index dc5a667..939040c 100644
#define atomic_dec_return(v) (atomic_sub_return(1, v))
/* These are x86-specific, used by some header files */
-@@ -266,9 +495,18 @@ typedef struct {
+-#define atomic_clear_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "andl %0,%1" \
+- : : "r" (~(mask)), "m" (*(addr)) : "memory")
++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
+
+-#define atomic_set_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "orl %0,%1" \
+- : : "r" (mask), "m" (*(addr)) : "memory")
++static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
++
++static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
++
++static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
+
+ /* Atomic operations are already serializing on x86 */
+ #define smp_mb__before_atomic_dec() barrier()
+@@ -266,9 +519,18 @@ typedef struct {
u64 __aligned(8) counter;
} atomic64_t;
@@ -10569,7 +10648,7 @@ index dc5a667..939040c 100644
/**
* atomic64_xchg - xchg atomic64 variable
-@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
+@@ -279,6 +541,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
* the old value.
*/
extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
@@ -10577,7 +10656,7 @@ index dc5a667..939040c 100644
/**
* atomic64_set - set atomic64 variable
-@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
+@@ -290,6 +553,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
extern void atomic64_set(atomic64_t *ptr, u64 new_val);
/**
@@ -10593,7 +10672,7 @@ index dc5a667..939040c 100644
* atomic64_read - read atomic64 variable
* @ptr: pointer to type atomic64_t
*
-@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
+@@ -317,7 +589,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
return res;
}
@@ -10628,7 +10707,7 @@ index dc5a667..939040c 100644
/**
* atomic64_add_return - add and return
-@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
+@@ -332,8 +630,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
* Other variants with different arithmetic operators:
*/
extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
@@ -10640,7 +10719,7 @@ index dc5a667..939040c 100644
/**
* atomic64_add - add integer to atomic64 variable
-@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
+@@ -345,6 +646,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
extern void atomic64_add(u64 delta, atomic64_t *ptr);
/**
@@ -10656,7 +10735,7 @@ index dc5a667..939040c 100644
* atomic64_sub - subtract the atomic64 variable
* @delta: integer value to subtract
* @ptr: pointer to type atomic64_t
-@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
+@@ -354,6 +664,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
extern void atomic64_sub(u64 delta, atomic64_t *ptr);
/**
@@ -10672,7 +10751,7 @@ index dc5a667..939040c 100644
* atomic64_sub_and_test - subtract value from variable and test result
* @delta: integer value to subtract
* @ptr: pointer to type atomic64_t
-@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
+@@ -373,6 +692,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
extern void atomic64_inc(atomic64_t *ptr);
/**
@@ -10687,7 +10766,7 @@ index dc5a667..939040c 100644
* atomic64_dec - decrement atomic64 variable
* @ptr: pointer to type atomic64_t
*
-@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
+@@ -381,6 +708,14 @@ extern void atomic64_inc(atomic64_t *ptr);
extern void atomic64_dec(atomic64_t *ptr);
/**
@@ -10703,7 +10782,7 @@ index dc5a667..939040c 100644
* @ptr: pointer to type atomic64_t
*
diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
-index d605dc2..fafd7bd 100644
+index d605dc2..72cb5cd 100644
--- a/arch/x86/include/asm/atomic_64.h
+++ b/arch/x86/include/asm/atomic_64.h
@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
@@ -11370,6 +11449,51 @@ index d605dc2..fafd7bd 100644
}
/**
+@@ -466,14 +864,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
+ /* These are x86-specific, used by some header files */
+-#define atomic_clear_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "andl %0,%1" \
+- : : "r" (~(mask)), "m" (*(addr)) : "memory")
++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
+
+-#define atomic_set_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "orl %0,%1" \
+- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
+- : "memory")
++static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
++
++static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
++
++static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
+
+ /* Atomic operations are already serializing on x86 */
+ #define smp_mb__before_atomic_dec() barrier()
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 02b47a6..d5c4b15 100644
--- a/arch/x86/include/asm/bitops.h
@@ -33762,7 +33886,7 @@ index 87c67b4..230527a 100644
.part_num = MBCS_PART_NUM,
.mfg_num = MBCS_MFG_NUM,
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
-index 1270f64..8495f49 100644
+index 1270f64..3b87405 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -18,6 +18,7 @@
@@ -33825,7 +33949,7 @@ index 1270f64..8495f49 100644
- if (copy_to_user(buf, ptr, sz)) {
+#ifdef CONFIG_PAX_USERCOPY
-+ temp = kmalloc(sz, GFP_KERNEL);
++ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
+ if (!temp) {
+ unxlate_dev_mem_ptr(p, ptr);
+ return -ENOMEM;
@@ -33878,7 +34002,7 @@ index 1270f64..8495f49 100644
- if (copy_to_user(buf, kbuf, sz))
+#ifdef CONFIG_PAX_USERCOPY
-+ temp = kmalloc(sz, GFP_KERNEL);
++ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
+ if (!temp)
+ return -ENOMEM;
+ memcpy(temp, kbuf, sz);
@@ -34062,7 +34186,7 @@ index 62f282e..e45c45c 100644
cdev_init(&ptmx_cdev, &ptmx_fops);
if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
diff --git a/drivers/char/random.c b/drivers/char/random.c
-index 3a19e2d..8eb80fc 100644
+index 3a19e2d..7d9aaad 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -125,20 +125,32 @@
@@ -34114,7 +34238,7 @@ index 3a19e2d..8eb80fc 100644
#ifdef CONFIG_GENERIC_HARDIRQS
# include <linux/irq.h>
-@@ -249,14 +262,21 @@
+@@ -249,14 +262,23 @@
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
@@ -34133,10 +34257,12 @@ index 3a19e2d..8eb80fc 100644
+#endif
#define SEC_XFER_SIZE 512
+#define EXTRACT_SIZE 10
++
++#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
/*
* The minimum number of bits of entropy before we wake up a read on
-@@ -292,10 +312,17 @@ static struct poolinfo {
+@@ -292,10 +314,17 @@ static struct poolinfo {
int poolwords;
int tap1, tap2, tap3, tap4, tap5;
} poolinfo_table[] = {
@@ -34154,7 +34280,7 @@ index 3a19e2d..8eb80fc 100644
#if 0
/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
{ 2048, 1638, 1231, 819, 411, 1 },
-@@ -412,9 +439,11 @@ struct entropy_store {
+@@ -412,9 +441,11 @@ struct entropy_store {
/* read-write data: */
spinlock_t lock;
unsigned add_ptr;
@@ -34168,7 +34294,7 @@ index 3a19e2d..8eb80fc 100644
};
static __u32 input_pool_data[INPUT_POOL_WORDS];
-@@ -446,6 +475,10 @@ static struct entropy_store nonblocking_pool = {
+@@ -446,6 +477,10 @@ static struct entropy_store nonblocking_pool = {
.pool = nonblocking_pool_data
};
@@ -34179,7 +34305,7 @@ index 3a19e2d..8eb80fc 100644
/*
* This function adds bytes into the entropy "pool". It does not
* update the entropy estimate. The caller should call
-@@ -456,29 +489,24 @@ static struct entropy_store nonblocking_pool = {
+@@ -456,29 +491,24 @@ static struct entropy_store nonblocking_pool = {
* it's cheap to do so and helps slightly in the expected case where
* the entropy is concentrated in the low-order bits.
*/
@@ -34214,7 +34340,7 @@ index 3a19e2d..8eb80fc 100644
/* mix one byte at a time to simplify size handling and churn faster */
while (nbytes--) {
-@@ -505,19 +533,53 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+@@ -505,19 +535,53 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
input_rotate += i ? 7 : 14;
}
@@ -34272,7 +34398,7 @@ index 3a19e2d..8eb80fc 100644
}
/*
-@@ -525,30 +587,34 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
+@@ -525,30 +589,34 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
*/
static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
@@ -34314,7 +34440,7 @@ index 3a19e2d..8eb80fc 100644
}
/*********************************************************************
-@@ -601,6 +667,25 @@ static void set_timer_rand_state(unsigned int irq,
+@@ -601,6 +669,25 @@ static void set_timer_rand_state(unsigned int irq,
}
#endif
@@ -34340,7 +34466,7 @@ index 3a19e2d..8eb80fc 100644
static struct timer_rand_state input_timer_state;
/*
-@@ -631,7 +716,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+@@ -631,7 +718,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
sample.jiffies = jiffies;
sample.cycles = get_cycles();
sample.num = num;
@@ -34349,7 +34475,7 @@ index 3a19e2d..8eb80fc 100644
/*
* Calculate number of bits of randomness we probably added.
-@@ -688,17 +773,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
+@@ -688,17 +775,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
}
EXPORT_SYMBOL_GPL(add_input_randomness);
@@ -34404,77 +34530,108 @@ index 3a19e2d..8eb80fc 100644
}
#ifdef CONFIG_BLOCK
-@@ -714,8 +830,6 @@ void add_disk_randomness(struct gendisk *disk)
+@@ -714,7 +832,16 @@ void add_disk_randomness(struct gendisk *disk)
}
#endif
-#define EXTRACT_SIZE 10
--
++#ifdef CONFIG_PAX_LATENT_ENTROPY
++u64 latent_entropy;
++
++__init void transfer_latent_entropy(void)
++{
++ mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy), NULL);
++ mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy), NULL);
++// printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
++}
++#endif
+
/*********************************************************************
*
- * Entropy extraction routines
-@@ -732,7 +846,11 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+@@ -732,7 +859,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
*/
static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
- __u32 tmp[OUTPUT_POOL_WORDS];
-+ union {
-+ __u32 tmp[OUTPUT_POOL_WORDS];
-+ long hwrand[4];
-+ } u;
-+ int i;
++ __u32 tmp[OUTPUT_POOL_WORDS];
if (r->pull && r->entropy_count < nbytes * 8 &&
r->entropy_count < r->poolinfo->POOLBITS) {
-@@ -743,17 +861,22 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
- /* pull at least as many as BYTES as wakeup BITS */
- bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
- /* but never more than the buffer size */
-- bytes = min_t(int, bytes, sizeof(tmp));
-+ bytes = min_t(int, bytes, sizeof(u.tmp));
-
- DEBUG_ENT("going to reseed %s with %d bits "
- "(%d of %d requested)\n",
- r->name, bytes * 8, nbytes * 8, r->entropy_count);
-
-- bytes = extract_entropy(r->pull, tmp, bytes,
-+ bytes = extract_entropy(r->pull, u.tmp, bytes,
+@@ -751,7 +878,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+
+ bytes = extract_entropy(r->pull, tmp, bytes,
random_read_wakeup_thresh / 8, rsvd);
- mix_pool_bytes(r, tmp, bytes);
-+ mix_pool_bytes(r, u.tmp, bytes, NULL);
++ mix_pool_bytes(r, tmp, bytes, NULL);
credit_entropy_bits(r, bytes*8);
}
-+ for (i = 0; i < 4; i++)
-+ if (arch_get_random_long(&u.hwrand[i]))
-+ break;
-+ if (i)
-+ mix_pool_bytes(r, &u.hwrand, sizeof(u.hwrand), 0);
}
-
- /*
-@@ -812,9 +935,11 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+@@ -810,13 +937,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
+ static void extract_buf(struct entropy_store *r, __u8 *out)
+ {
int i;
- __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
+- __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
++ union {
++ __u32 w[5];
++ unsigned long l[LONGS(EXTRACT_SIZE)];
++ } hash;
++ __u32 workspace[SHA_WORKSPACE_WORDS];
__u8 extract[64];
+ unsigned long flags;
/* Generate a hash across the pool, 16 words (512 bits) at a time */
- sha_init(hash);
+- sha_init(hash);
++ sha_init(hash.w);
+ spin_lock_irqsave(&r->lock, flags);
for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha_transform(hash, (__u8 *)(r->pool + i), workspace);
+- sha_transform(hash, (__u8 *)(r->pool + i), workspace);
++ sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
-@@ -827,7 +952,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ /*
+ * We mix the hash back into the pool to prevent backtracking
+@@ -827,13 +960,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
* brute-forcing the feedback as hard as brute-forcing the
* hash.
*/
- mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
-+ __mix_pool_bytes(r, hash, sizeof(hash), extract);
++ __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
+ spin_unlock_irqrestore(&r->lock, flags);
/*
* To avoid duplicates, we atomically extract a portion of the
-@@ -850,11 +976,10 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ * pool while mixing, and hash one final time.
+ */
+- sha_transform(hash, extract, workspace);
++ sha_transform(hash.w, extract, workspace);
+ memset(extract, 0, sizeof(extract));
+ memset(workspace, 0, sizeof(workspace));
+
+@@ -842,19 +976,30 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ * pattern, we fold it in half. Thus, we always feed back
+ * twice as much data as we output.
+ */
+- hash[0] ^= hash[3];
+- hash[1] ^= hash[4];
+- hash[2] ^= rol32(hash[2], 16);
+- memcpy(out, hash, EXTRACT_SIZE);
+- memset(hash, 0, sizeof(hash));
++ hash.w[0] ^= hash.w[3];
++ hash.w[1] ^= hash.w[4];
++ hash.w[2] ^= rol32(hash.w[2], 16);
++
++ /*
++ * If we have a architectural hardware random number
++ * generator, mix that in, too.
++ */
++ for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
++ unsigned long v;
++ if (!arch_get_random_long(&v))
++ break;
++ hash.l[i] ^= v;
++ }
++
++ memcpy(out, &hash, EXTRACT_SIZE);
++ memset(&hash, 0, sizeof(hash));
}
static ssize_t extract_entropy(struct entropy_store *r, void *buf,
@@ -34487,7 +34644,7 @@ index 3a19e2d..8eb80fc 100644
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
-@@ -862,7 +987,9 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+@@ -862,7 +1007,9 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
while (nbytes) {
extract_buf(r, tmp);
@@ -34498,7 +34655,7 @@ index 3a19e2d..8eb80fc 100644
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
-@@ -926,7 +1053,21 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+@@ -926,7 +1073,21 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
*/
void get_random_bytes(void *buf, int nbytes)
{
@@ -34521,7 +34678,7 @@ index 3a19e2d..8eb80fc 100644
}
EXPORT_SYMBOL(get_random_bytes);
-@@ -941,19 +1082,19 @@ EXPORT_SYMBOL(get_random_bytes);
+@@ -941,19 +1102,19 @@ EXPORT_SYMBOL(get_random_bytes);
*/
static void init_std_data(struct entropy_store *r)
{
@@ -34552,7 +34709,7 @@ index 3a19e2d..8eb80fc 100644
}
static int rand_initialize(void)
-@@ -1090,7 +1231,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+@@ -1090,7 +1251,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
count -= bytes;
p += bytes;
@@ -34561,7 +34718,7 @@ index 3a19e2d..8eb80fc 100644
cond_resched();
}
-@@ -1209,7 +1350,7 @@ EXPORT_SYMBOL(generate_random_uuid);
+@@ -1209,7 +1370,7 @@ EXPORT_SYMBOL(generate_random_uuid);
#include <linux/sysctl.h>
static int min_read_thresh = 8, min_write_thresh;
@@ -34570,7 +34727,7 @@ index 3a19e2d..8eb80fc 100644
static int max_write_thresh = INPUT_POOL_WORDS * 32;
static char sysctl_bootid[16];
-@@ -1231,10 +1372,15 @@ static int proc_do_uuid(ctl_table *table, int write,
+@@ -1231,10 +1392,15 @@ static int proc_do_uuid(ctl_table *table, int write,
uuid = table->data;
if (!uuid) {
uuid = tmp_uuid;
@@ -34589,7 +34746,7 @@ index 3a19e2d..8eb80fc 100644
sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-"
"%02x%02x%02x%02x%02x%02x",
-@@ -1279,6 +1425,7 @@ static int uuid_strategy(ctl_table *table,
+@@ -1279,6 +1445,7 @@ static int uuid_strategy(ctl_table *table,
}
static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
@@ -34597,7 +34754,7 @@ index 3a19e2d..8eb80fc 100644
ctl_table random_table[] = {
{
.ctl_name = RANDOM_POOLSIZE,
-@@ -1354,12 +1501,17 @@ late_initcall(random_int_secret_init);
+@@ -1354,12 +1521,17 @@ late_initcall(random_int_secret_init);
* value is not cryptographically secure but for several uses the cost of
* depleting entropy is too high
*/
@@ -35490,10 +35647,27 @@ index 7ff6e75..a2965d9 100644
void fw_card_initialize(struct fw_card *card,
const struct fw_card_driver *driver, struct device *device);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
-index 3a2ccb0..82fd7c4 100644
+index 3a2ccb0..8365cd1 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
-@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
+@@ -6,6 +6,7 @@
+ #include <linux/efi.h>
+ #include <linux/bootmem.h>
+ #include <linux/slab.h>
++#include <linux/random.h>
+ #include <asm/dmi.h>
+
+ /*
+@@ -111,6 +112,8 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
+
+ dmi_table(buf, dmi_len, dmi_num, decode, NULL);
+
++ add_device_randomness(buf, dmi_len);
++
+ dmi_iounmap(buf, dmi_len);
+ return 0;
+ }
+@@ -391,11 +394,6 @@ void __init dmi_scan_machine(void)
}
}
else {
@@ -35505,7 +35679,7 @@ index 3a2ccb0..82fd7c4 100644
p = dmi_ioremap(0xF0000, 0x10000);
if (p == NULL)
goto error;
-@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
+@@ -667,7 +665,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
if (buf == NULL)
return -1;
@@ -62700,7 +62874,7 @@ index e4e4d43..66bcbcc 100644
.update_status = aty128_bl_update_status,
};
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
-index 913b4a4..9295a38 100644
+index 913b4a4..4de325a9 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
@@ -62712,6 +62886,55 @@ index 913b4a4..9295a38 100644
.get_brightness = aty_bl_get_brightness,
.update_status = aty_bl_update_status,
};
+@@ -2970,9 +2970,8 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
+ {
+ struct atyfb_par *par = info->par;
+ struct device_node *dp;
+- char prop[128];
+- int node, len, i, j, ret;
+ u32 mem, chip_id;
++ int i, j, ret;
+
+ /*
+ * Map memory-mapped registers.
+@@ -3088,23 +3087,8 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
+ aty_st_le32(MEM_CNTL, mem, par);
+ }
+
+- /*
+- * If this is the console device, we will set default video
+- * settings to what the PROM left us with.
+- */
+- node = prom_getchild(prom_root_node);
+- node = prom_searchsiblings(node, "aliases");
+- if (node) {
+- len = prom_getproperty(node, "screen", prop, sizeof(prop));
+- if (len > 0) {
+- prop[len] = '\0';
+- node = prom_finddevice(prop);
+- } else
+- node = 0;
+- }
+-
+ dp = pci_device_to_OF_node(pdev);
+- if (node == dp->node) {
++ if (dp == of_console_device) {
+ struct fb_var_screeninfo *var = &default_var;
+ unsigned int N, P, Q, M, T, R;
+ u32 v_total, h_total;
+@@ -3112,9 +3096,9 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
+ u8 pll_regs[16];
+ u8 clock_cntl;
+
+- crtc.vxres = prom_getintdefault(node, "width", 1024);
+- crtc.vyres = prom_getintdefault(node, "height", 768);
+- var->bits_per_pixel = prom_getintdefault(node, "depth", 8);
++ crtc.vxres = of_getintprop_default(dp, "width", 1024);
++ crtc.vyres = of_getintprop_default(dp, "height", 768);
++ var->bits_per_pixel = of_getintprop_default(dp, "depth", 8);
+ var->xoffset = var->yoffset = 0;
+ crtc.h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
+ crtc.h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index 1a056ad..221bd6a 100644
--- a/drivers/video/aty/radeon_backlight.c
@@ -66305,7 +66528,7 @@ index 0133b5a..3710d09 100644
(unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
#ifdef __alpha__
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index a64fde6..36d9464 100644
+index a64fde6..6583da2 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -31,6 +31,7 @@
@@ -66438,7 +66661,7 @@ index a64fde6..36d9464 100644
error = -ENOMEM;
goto out_close;
}
-@@ -532,6 +558,349 @@ out:
+@@ -532,6 +558,311 @@ out:
return error;
}
@@ -66458,15 +66681,6 @@ index a64fde6..36d9464 100644
+ pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ if (nx_enabled)
-+ pax_flags &= ~MF_PAX_SEGMEXEC;
-+ else
-+ pax_flags &= ~MF_PAX_PAGEEXEC;
-+ }
-+#endif
-+
+#ifdef CONFIG_PAX_EMUTRAMP
+ if (elf_phdata->p_flags & PF_EMUTRAMP)
+ pax_flags |= MF_PAX_EMUTRAMP;
@@ -66500,15 +66714,6 @@ index a64fde6..36d9464 100644
+ pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ if (nx_enabled)
-+ pax_flags &= ~MF_PAX_SEGMEXEC;
-+ else
-+ pax_flags &= ~MF_PAX_PAGEEXEC;
-+ }
-+#endif
-+
+#ifdef CONFIG_PAX_EMUTRAMP
+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
+ pax_flags |= MF_PAX_EMUTRAMP;
@@ -66544,15 +66749,6 @@ index a64fde6..36d9464 100644
+ pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ if (nx_enabled)
-+ pax_flags &= ~MF_PAX_SEGMEXEC;
-+ else
-+ pax_flags &= ~MF_PAX_PAGEEXEC;
-+ }
-+#endif
-+
+#ifdef CONFIG_PAX_EMUTRAMP
+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
+ pax_flags |= MF_PAX_EMUTRAMP;
@@ -66586,15 +66782,6 @@ index a64fde6..36d9464 100644
+ pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ if ((__supported_pte_mask & _PAGE_NX))
-+ pax_flags &= ~MF_PAX_SEGMEXEC;
-+ else
-+ pax_flags &= ~MF_PAX_PAGEEXEC;
-+ }
-+#endif
-+
+#ifdef CONFIG_PAX_EMUTRAMP
+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
+ pax_flags |= MF_PAX_EMUTRAMP;
@@ -66614,7 +66801,7 @@ index a64fde6..36d9464 100644
+}
+#endif
+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
+{
+ unsigned long pax_flags = 0UL;
@@ -66631,15 +66818,6 @@ index a64fde6..36d9464 100644
+ pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ if ((__supported_pte_mask & _PAGE_NX))
-+ pax_flags &= ~MF_PAX_SEGMEXEC;
-+ else
-+ pax_flags &= ~MF_PAX_PAGEEXEC;
-+ }
-+#endif
-+
+#ifdef CONFIG_PAX_EMUTRAMP
+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
+ pax_flags |= MF_PAX_EMUTRAMP;
@@ -66661,19 +66839,17 @@ index a64fde6..36d9464 100644
+ pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
+#ifdef CONFIG_PAX_MPROTECT
+ pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#ifdef CONFIG_PAX_RANDMMAP
-+ pax_flags |= MF_PAX_RANDMMAP;
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
-+ pax_flags &= ~MF_PAX_PAGEEXEC;
-+ pax_flags |= MF_PAX_SEGMEXEC;
-+ }
++ if (randomize_va_space)
++ pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+#endif
@@ -66777,6 +66953,15 @@ index a64fde6..36d9464 100644
+ if (pt_pax_flags != ~0UL)
+ pax_flags = pt_pax_flags;
+
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
+ if (0 > pax_check_flags(&pax_flags))
+ return -EINVAL;
+
@@ -66788,7 +66973,7 @@ index a64fde6..36d9464 100644
/*
* These are the functions used to load ELF style executables and shared
* libraries. There is no binary dependent code anywhere else.
-@@ -548,6 +917,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
+@@ -548,6 +879,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
{
unsigned int random_variable = 0;
@@ -66800,7 +66985,7 @@ index a64fde6..36d9464 100644
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE)) {
random_variable = get_random_int() & STACK_RND_MASK;
-@@ -566,7 +940,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -566,7 +902,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long load_addr = 0, load_bias = 0;
int load_addr_set = 0;
char * elf_interpreter = NULL;
@@ -66809,7 +66994,7 @@ index a64fde6..36d9464 100644
struct elf_phdr *elf_ppnt, *elf_phdata;
unsigned long elf_bss, elf_brk;
int retval, i;
-@@ -576,11 +950,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -576,11 +912,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long start_code, end_code, start_data, end_data;
unsigned long reloc_func_desc = 0;
int executable_stack = EXSTACK_DEFAULT;
@@ -66822,7 +67007,7 @@ index a64fde6..36d9464 100644
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
if (!loc) {
-@@ -718,11 +1092,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -718,11 +1054,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
/* OK, This is the point of no return */
current->flags &= ~PF_FORKNOEXEC;
@@ -66847,7 +67032,7 @@ index a64fde6..36d9464 100644
+
+ current->mm->def_flags = 0;
+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
+ send_sig(SIGKILL, current, 0);
+ goto out_free_dentry;
@@ -66904,7 +67089,7 @@ index a64fde6..36d9464 100644
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
-@@ -800,10 +1243,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -800,10 +1205,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
* might try to exec. This is because the brk will
* follow the loader, and is not movable. */
#ifdef CONFIG_X86
@@ -66933,7 +67118,7 @@ index a64fde6..36d9464 100644
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-@@ -836,9 +1296,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -836,9 +1258,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
* allowed task size. Note that p_filesz must always be
* <= p_memsz so it is only necessary to check p_memsz.
*/
@@ -66946,7 +67131,7 @@ index a64fde6..36d9464 100644
/* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
retval = -EINVAL;
-@@ -877,11 +1337,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -877,11 +1299,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto out_free_dentry;
}
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
@@ -66990,7 +67175,7 @@ index a64fde6..36d9464 100644
if (elf_interpreter) {
unsigned long uninitialized_var(interp_map_addr);
-@@ -1112,8 +1601,10 @@ static int dump_seek(struct file *file, loff_t off)
+@@ -1112,8 +1563,10 @@ static int dump_seek(struct file *file, loff_t off)
unsigned long n = off;
if (n > PAGE_SIZE)
n = PAGE_SIZE;
@@ -67002,7 +67187,7 @@ index a64fde6..36d9464 100644
off -= n;
}
free_page((unsigned long)buf);
-@@ -1125,7 +1616,7 @@ static int dump_seek(struct file *file, loff_t off)
+@@ -1125,7 +1578,7 @@ static int dump_seek(struct file *file, loff_t off)
* Decide what to dump of a segment, part, all or none.
*/
static unsigned long vma_dump_size(struct vm_area_struct *vma,
@@ -67011,7 +67196,7 @@ index a64fde6..36d9464 100644
{
#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
-@@ -1159,7 +1650,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+@@ -1159,7 +1612,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
if (vma->vm_file == NULL)
return 0;
@@ -67020,7 +67205,7 @@ index a64fde6..36d9464 100644
goto whole;
/*
-@@ -1255,8 +1746,11 @@ static int writenote(struct memelfnote *men, struct file *file,
+@@ -1255,8 +1708,11 @@ static int writenote(struct memelfnote *men, struct file *file,
#undef DUMP_WRITE
#define DUMP_WRITE(addr, nr) \
@@ -67033,7 +67218,7 @@ index a64fde6..36d9464 100644
static void fill_elf_header(struct elfhdr *elf, int segs,
u16 machine, u32 flags, u8 osabi)
-@@ -1385,9 +1879,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+@@ -1385,9 +1841,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
{
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
int i = 0;
@@ -67045,7 +67230,7 @@ index a64fde6..36d9464 100644
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
-@@ -1973,7 +2467,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
+@@ -1973,7 +2429,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
@@ -67054,7 +67239,7 @@ index a64fde6..36d9464 100644
phdr.p_memsz = vma->vm_end - vma->vm_start;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
-@@ -2006,7 +2500,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
+@@ -2006,7 +2462,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
unsigned long addr;
unsigned long end;
@@ -67063,7 +67248,7 @@ index a64fde6..36d9464 100644
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
-@@ -2015,6 +2509,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
+@@ -2015,6 +2471,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
page = get_dump_page(addr);
if (page) {
void *kaddr = kmap(page);
@@ -67071,7 +67256,7 @@ index a64fde6..36d9464 100644
stop = ((size += PAGE_SIZE) > limit) ||
!dump_write(file, kaddr, PAGE_SIZE);
kunmap(page);
-@@ -2042,6 +2537,97 @@ out:
+@@ -2042,6 +2499,97 @@ out:
#endif /* USE_ELF_CORE_DUMP */
@@ -68898,7 +69083,7 @@ index f539204..068db1f 100644
fput(tfile);
diff --git a/fs/exec.c b/fs/exec.c
-index 86fafc6..d54d849 100644
+index 86fafc6..a9275f4 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -56,12 +56,33 @@
@@ -69390,7 +69575,7 @@ index 86fafc6..d54d849 100644
out:
if (bprm->mm) {
acct_arg_size(bprm, 0);
-@@ -1591,6 +1746,229 @@ out:
+@@ -1591,6 +1746,251 @@ out:
return ispipe;
}
@@ -69535,7 +69720,7 @@ index 86fafc6..d54d849 100644
+
+#ifdef CONFIG_PAX_USERCOPY
+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
-+int object_is_on_stack(const void *obj, unsigned long len)
++static noinline int check_stack_object(const void *obj, unsigned long len)
+{
+ const void * const stack = task_stack_page(current);
+ const void * const stackend = stack + THREAD_SIZE;
@@ -69581,7 +69766,7 @@ index 86fafc6..d54d849 100644
+#endif
+}
+
-+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
++static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
+{
+ if (current->signal->curr_ip)
+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
@@ -69596,6 +69781,28 @@ index 86fafc6..d54d849 100644
+}
+#endif
+
++void check_object_size(const void *ptr, unsigned long n, bool to)
++{
++
++#ifdef CONFIG_PAX_USERCOPY
++ const char *type;
++
++ if (!n)
++ return;
++
++ type = check_heap_object(ptr, n, to);
++ if (!type) {
++ if (check_stack_object(ptr, n) != -1)
++ return;
++ type = "<process stack>";
++ }
++
++ pax_report_usercopy(ptr, n, to, type);
++#endif
++
++}
++EXPORT_SYMBOL(check_object_size);
++
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+void pax_track_stack(void)
+{
@@ -69620,7 +69827,7 @@ index 86fafc6..d54d849 100644
static int zap_process(struct task_struct *start)
{
struct task_struct *t;
-@@ -1793,17 +2171,17 @@ static void wait_for_dump_helpers(struct file *file)
+@@ -1793,17 +2193,17 @@ static void wait_for_dump_helpers(struct file *file)
pipe = file->f_path.dentry->d_inode->i_pipe;
pipe_lock(pipe);
@@ -69643,7 +69850,7 @@ index 86fafc6..d54d849 100644
pipe_unlock(pipe);
}
-@@ -1826,10 +2204,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -1826,10 +2226,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
char **helper_argv = NULL;
int helper_argc = 0;
int dump_count = 0;
@@ -69658,7 +69865,7 @@ index 86fafc6..d54d849 100644
binfmt = mm->binfmt;
if (!binfmt || !binfmt->core_dump)
goto fail;
-@@ -1874,6 +2255,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -1874,6 +2277,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
*/
clear_thread_flag(TIF_SIGPENDING);
@@ -69667,7 +69874,7 @@ index 86fafc6..d54d849 100644
/*
* lock_kernel() because format_corename() is controlled by sysctl, which
* uses lock_kernel()
-@@ -1908,7 +2291,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -1908,7 +2313,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
goto fail_unlock;
}
@@ -69676,7 +69883,7 @@ index 86fafc6..d54d849 100644
if (core_pipe_limit && (core_pipe_limit < dump_count)) {
printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
task_tgid_vnr(current), current->comm);
-@@ -1972,7 +2355,7 @@ close_fail:
+@@ -1972,7 +2377,7 @@ close_fail:
filp_close(file, NULL);
fail_dropcount:
if (dump_count)
@@ -86059,7 +86266,7 @@ index f4906f6..71feb73 100644
{
return -ENODEV;
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
-index b7babf0..a9ac9fc 100644
+index b7babf0..1df7140 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -22,6 +22,12 @@
@@ -86280,7 +86487,7 @@ index b7babf0..a9ac9fc 100644
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
-@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+@@ -255,4 +375,53 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
#endif /* BITS_PER_LONG == 64 */
@@ -86298,6 +86505,10 @@ index b7babf0..a9ac9fc 100644
+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
++#ifdef CONFIG_X86
++ atomic_clear_mask_unchecked(0, NULL);
++ atomic_set_mask_unchecked(0, NULL);
++#endif
+
+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
@@ -86318,6 +86529,8 @@ index b7babf0..a9ac9fc 100644
+#define atomic_dec_unchecked(v) atomic_dec(v)
+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
++#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
++#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
+
+#define atomic_long_read_unchecked(v) atomic_long_read(v)
+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
@@ -86328,6 +86541,19 @@ index b7babf0..a9ac9fc 100644
+#endif
+
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
+index c99c64d..f173e40 100644
+--- a/include/asm-generic/atomic.h
++++ b/include/asm-generic/atomic.h
+@@ -134,7 +134,7 @@ static inline void atomic_dec(atomic_t *v)
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+ {
+ unsigned long flags;
+
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index b18ce4f..2ee2843 100644
--- a/include/asm-generic/atomic64.h
@@ -87030,20 +87256,25 @@ index c8f2a5f7..1618a5c 100644
/* audit system wants to get cap info from files as well */
struct dentry;
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
-index 450fa59..7c875cb 100644
+index 450fa59..b658078 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
-@@ -14,6 +14,9 @@
+@@ -14,6 +14,14 @@
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
#define __always_inline inline __attribute__((always_inline))
+#ifdef SIZE_OVERFLOW_PLUGIN
+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
+#endif
++
++#ifdef LATENT_ENTROPY_PLUGIN
++#define __latent_entropy __attribute__((latent_entropy))
++#endif
++
/*
* A trick to suppress uninitialized variable warning without generating any
* code
-@@ -36,4 +39,23 @@
+@@ -36,4 +44,23 @@
the kernel context */
#define __cold __attribute__((__cold__))
@@ -87068,7 +87299,7 @@ index 450fa59..7c875cb 100644
+#define __compiletime_error(message) __attribute__((error(message)))
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
-index 04fb513..edaeada 100644
+index 04fb513..7ab44ac 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -5,11 +5,14 @@
@@ -87121,7 +87352,7 @@ index 04fb513..edaeada 100644
# define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0
# define __builtin_warning(x, y...) (1)
-@@ -247,6 +271,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -247,6 +271,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __attribute_const__ /* unimplemented */
#endif
@@ -87136,10 +87367,15 @@ index 04fb513..edaeada 100644
+#ifndef __size_overflow
+# define __size_overflow(...)
+#endif
++
++#ifndef __latent_entropy
++# define __latent_entropy
++#endif
++
/*
* Tell gcc if a function is cold. The compiler will assume any path
* directly leading to the call is unlikely.
-@@ -256,6 +291,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -256,6 +296,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define __cold
#endif
@@ -87162,7 +87398,7 @@ index 04fb513..edaeada 100644
/* Simple shorthand for a section definition */
#ifndef __section
# define __section(S) __attribute__ ((__section__(#S)))
-@@ -266,6 +317,19 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -266,6 +322,19 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
#endif
@@ -87182,7 +87418,7 @@ index 04fb513..edaeada 100644
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
-@@ -278,6 +342,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -278,6 +347,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
* use is to mediate communication between process-level code and irq/NMI
* handlers, all running on the same CPU.
*/
@@ -87662,6 +87898,41 @@ index 297df45..b6a74ff 100644
struct work_struct async_notify;
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity *integrity;
+diff --git a/include/linux/gfp.h b/include/linux/gfp.h
+index 557bdad..b5e8c98 100644
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -53,6 +53,12 @@ struct vm_area_struct;
+ #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
+ #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
+
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++#define __GFP_USERCOPY ((__force gfp_t)0x1000000u)
++#else
++#define __GFP_USERCOPY ((__force gfp_t)0)
++#endif
++
+ #ifdef CONFIG_KMEMCHECK
+ #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */
+ #else
+@@ -65,7 +71,7 @@ struct vm_area_struct;
+ */
+ #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
+
+-#define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */
++#define __GFP_BITS_SHIFT 26 /* Room for 26 __GFP_FOO bits */
+ #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+
+ /* This equals 0, but use constants in case they ever change */
+@@ -115,6 +121,8 @@ struct vm_area_struct;
+ /* 4GB DMA on some platforms */
+ #define GFP_DMA32 __GFP_DMA32
+
++#define GFP_USERCOPY __GFP_USERCOPY
++
+ /* Convert GFP flags to their corresponding migrate type */
+ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
+ {
diff --git a/include/linux/gracl.h b/include/linux/gracl.h
new file mode 100644
index 0000000..fc80ba3
@@ -88779,6 +89050,54 @@ index 4c4e57d..f3c5303 100644
struct list_head context_list; /* list of context id's
and pointers */
#endif
+diff --git a/include/linux/init.h b/include/linux/init.h
+index ff8bde5..0296174 100644
+--- a/include/linux/init.h
++++ b/include/linux/init.h
+@@ -38,9 +38,15 @@
+ * Also note, that this data cannot be "const".
+ */
+
++#ifdef MODULE
++#define add_latent_entropy
++#else
++#define add_latent_entropy __latent_entropy
++#endif
++
+ /* These are for everybody (although not all archs will actually
+ discard it in modules) */
+-#define __init __section(.init.text) __cold notrace
++#define __init __section(.init.text) __cold notrace add_latent_entropy
+ #define __initdata __section(.init.data)
+ #define __initconst __section(.init.rodata)
+ #define __exitdata __section(.exit.data)
+@@ -75,7 +81,7 @@
+ #define __exit __section(.exit.text) __exitused __cold
+
+ /* Used for HOTPLUG */
+-#define __devinit __section(.devinit.text) __cold
++#define __devinit __section(.devinit.text) __cold add_latent_entropy
+ #define __devinitdata __section(.devinit.data)
+ #define __devinitconst __section(.devinit.rodata)
+ #define __devexit __section(.devexit.text) __exitused __cold
+@@ -83,7 +89,7 @@
+ #define __devexitconst __section(.devexit.rodata)
+
+ /* Used for HOTPLUG_CPU */
+-#define __cpuinit __section(.cpuinit.text) __cold
++#define __cpuinit __section(.cpuinit.text) __cold add_latent_entropy
+ #define __cpuinitdata __section(.cpuinit.data)
+ #define __cpuinitconst __section(.cpuinit.rodata)
+ #define __cpuexit __section(.cpuexit.text) __exitused __cold
+@@ -91,7 +97,7 @@
+ #define __cpuexitconst __section(.cpuexit.rodata)
+
+ /* Used for MEMORY_HOTPLUG */
+-#define __meminit __section(.meminit.text) __cold
++#define __meminit __section(.meminit.text) __cold add_latent_entropy
+ #define __meminitdata __section(.meminit.data)
+ #define __meminitconst __section(.meminit.rodata)
+ #define __memexit __section(.memexit.text) __exitused __cold
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 21a6f5d..7c7d19f 100644
--- a/include/linux/init_task.h
@@ -89837,10 +90156,10 @@ index 7456d7d..6c1cfc9 100644
static inline int ptrace_reparented(struct task_struct *child)
{
diff --git a/include/linux/random.h b/include/linux/random.h
-index 2948046..6fe7065 100644
+index 2948046..16bad29 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
-@@ -46,9 +46,10 @@ struct rand_pool_info {
+@@ -46,9 +46,14 @@ struct rand_pool_info {
extern void rand_initialize_irq(int irq);
@@ -89849,10 +90168,14 @@ index 2948046..6fe7065 100644
unsigned int value);
-extern void add_interrupt_randomness(int irq);
+extern void add_interrupt_randomness(int irq, int irq_flags);
++
++#ifdef CONFIG_PAX_LATENT_ENTROPY
++extern void transfer_latent_entropy(void);
++#endif
extern void get_random_bytes(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
-@@ -63,6 +64,24 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
+@@ -63,6 +68,24 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
u32 random32(void);
void srandom32(u32 seed);
@@ -90005,7 +90328,7 @@ index 3392c59..a746428 100644
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 71849bf..90ac063 100644
+index 71849bf..903514a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -101,6 +101,7 @@ struct bio;
@@ -90208,7 +90531,7 @@ index 71849bf..90ac063 100644
+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
+extern void pax_report_refcount_overflow(struct pt_regs *regs);
-+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
+
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+extern void pax_track_stack(void);
@@ -90255,7 +90578,7 @@ index 71849bf..90ac063 100644
extern void daemonize(const char *, ...);
extern int allow_signal(int);
-@@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2284,9 +2384,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#endif
@@ -90267,15 +90590,7 @@ index 71849bf..90ac063 100644
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
-
-+#ifdef CONFIG_PAX_USERCOPY
-+extern int object_is_on_stack(const void *obj, unsigned long len);
-+#endif
-+
- extern void thread_info_cache_init(void);
-
- #ifdef CONFIG_DEBUG_STACK_USAGE
-@@ -2616,6 +2720,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
+@@ -2616,6 +2716,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
@@ -90481,7 +90796,7 @@ index bcdd660..fd2e332 100644
/**
diff --git a/include/linux/slab.h b/include/linux/slab.h
-index 2da8372..96b37db 100644
+index 2da8372..a462292 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -11,12 +11,20 @@
@@ -90496,7 +90811,7 @@ index 2da8372..96b37db 100644
*/
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
+
-+#ifdef CONFIG_PAX_USERCOPY
++#ifdef CONFIG_PAX_USERCOPY_SLABS
+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
+#else
+#define SLAB_USERCOPY 0x00000000UL
@@ -90522,15 +90837,16 @@ index 2da8372..96b37db 100644
/*
* struct kmem_cache related prototypes
-@@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
+@@ -138,6 +149,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
-+void check_object_size(const void *ptr, unsigned long n, bool to);
++const char *check_heap_object(const void *ptr, unsigned long n, bool to);
++bool is_usercopy_object(const void *ptr);
/*
* Allocator specific definitions. These are mainly used to establish optimized
-@@ -263,7 +275,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
+@@ -263,7 +276,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
@@ -90539,7 +90855,7 @@ index 2da8372..96b37db 100644
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
#else
-@@ -281,7 +293,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
+@@ -281,7 +294,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
* allocation request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
@@ -90549,7 +90865,7 @@ index 2da8372..96b37db 100644
__kmalloc_node_track_caller(size, flags, node, \
_RET_IP_)
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
-index 850d057..6de7888 100644
+index 850d057..aa58075 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -69,10 +69,10 @@ struct kmem_cache {
@@ -90567,7 +90883,16 @@ index 850d057..6de7888 100644
/*
* If debugging is enabled, then the allocator can add additional
-@@ -108,7 +108,7 @@ struct cache_sizes {
+@@ -104,11 +104,16 @@ struct cache_sizes {
+ #ifdef CONFIG_ZONE_DMA
+ struct kmem_cache *cs_dmacachep;
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ struct kmem_cache *cs_usercopycachep;
++#endif
++
+ };
extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
@@ -90576,7 +90901,21 @@ index 850d057..6de7888 100644
#ifdef CONFIG_KMEMTRACE
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
-@@ -163,7 +163,7 @@ found:
+@@ -150,6 +155,13 @@ found:
+ cachep = malloc_sizes[i].cs_dmacachep;
+ else
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ if (flags & GFP_USERCOPY)
++ cachep = malloc_sizes[i].cs_usercopycachep;
++ else
++#endif
++
+ cachep = malloc_sizes[i].cs_cachep;
+
+ ret = kmem_cache_alloc_notrace(cachep, flags);
+@@ -163,7 +175,7 @@ found:
}
#ifdef CONFIG_NUMA
@@ -90585,6 +90924,20 @@ index 850d057..6de7888 100644
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
#ifdef CONFIG_KMEMTRACE
+@@ -205,6 +217,13 @@ found:
+ cachep = malloc_sizes[i].cs_dmacachep;
+ else
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ if (flags & GFP_USERCOPY)
++ cachep = malloc_sizes[i].cs_usercopycachep;
++ else
++#endif
++
+ cachep = malloc_sizes[i].cs_cachep;
+
+ ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 0ec00b3..39cb7fc 100644
--- a/include/linux/slob_def.h
@@ -92254,7 +92607,7 @@ index 1fd59b8..a01b079 100644
next_state = Reset;
return 0;
diff --git a/init/main.c b/init/main.c
-index 1eb4bd5..fea5bbe 100644
+index 1eb4bd5..7bc6316 100644
--- a/init/main.c
+++ b/init/main.c
@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
@@ -92385,7 +92738,40 @@ index 1eb4bd5..fea5bbe 100644
}
-@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
+@@ -760,9 +805,15 @@ static void __init do_initcalls(void)
+ {
+ initcall_t *call;
+
+- for (call = __early_initcall_end; call < __initcall_end; call++)
++ for (call = __early_initcall_end; call < __initcall_end; call++) {
+ do_one_initcall(*call);
+
++#ifdef CONFIG_PAX_LATENT_ENTROPY
++ transfer_latent_entropy();
++#endif
++
++ }
++
+ /* Make sure there is no pending stuff from the initcall sequence */
+ flush_scheduled_work();
+ }
+@@ -790,8 +841,14 @@ static void __init do_pre_smp_initcalls(void)
+ {
+ initcall_t *call;
+
+- for (call = __initcall_start; call < __early_initcall_end; call++)
++ for (call = __initcall_start; call < __early_initcall_end; call++) {
+ do_one_initcall(*call);
++
++#ifdef CONFIG_PAX_LATENT_ENTROPY
++ transfer_latent_entropy();
++#endif
++
++ }
+ }
+
+ static void run_init_process(char *init_filename)
+@@ -893,11 +950,13 @@ static int __init kernel_init(void * unused)
if (!ramdisk_execute_command)
ramdisk_execute_command = "/init";
@@ -95662,7 +96048,7 @@ index 40dd021..fb30ceb 100644
mutex_lock(&pm_mutex);
suspend_ops = ops;
diff --git a/kernel/printk.c b/kernel/printk.c
-index 4cade47..4d17900 100644
+index 4cade47..4ddd097 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -33,6 +33,7 @@
@@ -95793,6 +96179,36 @@ index 4cade47..4d17900 100644
}
/*
+@@ -1153,6 +1154,18 @@ void console_start(struct console *console)
+ }
+ EXPORT_SYMBOL(console_start);
+
++static int __read_mostly keep_bootcon = 0;
++
++static int __init keep_bootcon_setup(char *str)
++{
++ keep_bootcon = 1;
++ printk(KERN_INFO "debug: skip boot console de-registration.\n");
++
++ return 0;
++}
++
++early_param("keep_bootcon", keep_bootcon_setup);
++
+ /*
+ * The console driver calls this routine during kernel initialization
+ * to register the console printing procedure with printk() and to
+@@ -1299,7 +1312,9 @@ void register_console(struct console *newcon)
+ * users know there might be something in the kernel's log buffer that
+ * went to the bootconsole (that they do not see on the real console)
+ */
+- if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
++ if (bcon &&
++ ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
++ !keep_bootcon) {
+ /* we need to iterate through twice, to make sure we print
+ * everything out, before we unregister the console(s)
+ */
diff --git a/kernel/profile.c b/kernel/profile.c
index dfadc5b..7f59404 100644
--- a/kernel/profile.c
@@ -99244,7 +99660,7 @@ index aaca868..2ebecdc 100644
err = -EPERM;
goto out;
diff --git a/mm/mlock.c b/mm/mlock.c
-index 2d846cf..98134d2 100644
+index 2d846cf..8d5cdd8 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -13,6 +13,7 @@
@@ -99287,7 +99703,7 @@ index 2d846cf..98134d2 100644
unsigned long nstart, end, tmp;
struct vm_area_struct * vma, * prev;
- int error;
-+ int error = -EINVAL;
++ int error = 0;
len = PAGE_ALIGN(len);
end = start + len;
@@ -101176,7 +101592,7 @@ index 3e0005b..1d659a8 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index c8d466a..909e01e 100644
+index c8d466a..60546da 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -174,7 +174,7 @@
@@ -101230,7 +101646,33 @@ index c8d466a..909e01e 100644
{
u32 offset = (obj - slab->s_mem);
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
-@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
+@@ -579,10 +579,11 @@ EXPORT_SYMBOL(malloc_sizes);
+ struct cache_names {
+ char *name;
+ char *name_dma;
++ char *name_usercopy;
+ };
+
+ static struct cache_names __initdata cache_names[] = {
+-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
++#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
+ #include <linux/kmalloc_sizes.h>
+ {NULL,}
+ #undef CACHE
+@@ -719,6 +720,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
+ if (unlikely(gfpflags & GFP_DMA))
+ return csizep->cs_dmacachep;
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ if (unlikely(gfpflags & GFP_USERCOPY))
++ return csizep->cs_usercopycachep;
++#endif
++
+ return csizep->cs_cachep;
+ }
+
+@@ -1453,7 +1460,7 @@ void __init kmem_cache_init(void)
sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
@@ -101239,7 +101681,7 @@ index c8d466a..909e01e 100644
NULL);
if (INDEX_AC != INDEX_L3) {
-@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
+@@ -1461,7 +1468,7 @@ void __init kmem_cache_init(void)
kmem_cache_create(names[INDEX_L3].name,
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
@@ -101248,7 +101690,7 @@ index c8d466a..909e01e 100644
NULL);
}
-@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
+@@ -1479,7 +1486,7 @@ void __init kmem_cache_init(void)
sizes->cs_cachep = kmem_cache_create(names->name,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
@@ -101257,7 +101699,24 @@ index c8d466a..909e01e 100644
NULL);
}
#ifdef CONFIG_ZONE_DMA
-@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
+@@ -1491,6 +1498,16 @@ void __init kmem_cache_init(void)
+ SLAB_PANIC,
+ NULL);
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ sizes->cs_usercopycachep = kmem_cache_create(
++ names->name_usercopy,
++ sizes->cs_size,
++ ARCH_KMALLOC_MINALIGN,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
++ NULL);
++#endif
++
+ sizes++;
+ names++;
+ }
+@@ -4211,10 +4228,10 @@ static int s_show(struct seq_file *m, void *p)
}
/* cpu stats */
{
@@ -101272,7 +101731,7 @@ index c8d466a..909e01e 100644
seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
allochit, allocmiss, freehit, freemiss);
-@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
+@@ -4471,15 +4488,76 @@ static const struct file_operations proc_slabstats_operations = {
static int __init slab_proc_init(void)
{
@@ -101293,60 +101752,66 @@ index c8d466a..909e01e 100644
module_init(slab_proc_init);
#endif
-+void check_object_size(const void *ptr, unsigned long n, bool to)
++bool is_usercopy_object(const void *ptr)
+{
++ struct page *page;
++ struct kmem_cache *cachep;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ return false;
++
++ if (!virt_addr_valid(ptr))
++ return false;
++
++ page = virt_to_head_page(ptr);
++
++ if (!PageSlab(page))
++ return false;
++
++ cachep = page_get_cache(page);
++ return cachep->flags & SLAB_USERCOPY;
++}
+
+#ifdef CONFIG_PAX_USERCOPY
++const char *check_heap_object(const void *ptr, unsigned long n, bool to)
++{
+ struct page *page;
-+ struct kmem_cache *cachep = NULL;
++ struct kmem_cache *cachep;
+ struct slab *slabp;
+ unsigned int objnr;
+ unsigned long offset;
-+ const char *type;
+
-+ if (!n)
-+ return;
-+
-+ type = "<null>";
+ if (ZERO_OR_NULL_PTR(ptr))
-+ goto report;
++ return "<null>";
+
+ if (!virt_addr_valid(ptr))
-+ return;
++ return NULL;
+
+ page = virt_to_head_page(ptr);
+
-+ type = "<process stack>";
-+ if (!PageSlab(page)) {
-+ if (object_is_on_stack(ptr, n) == -1)
-+ goto report;
-+ return;
-+ }
++ if (!PageSlab(page))
++ return NULL;
+
+ cachep = page_get_cache(page);
-+ type = cachep->name;
+ if (!(cachep->flags & SLAB_USERCOPY))
-+ goto report;
++ return cachep->name;
+
+ slabp = page_get_slab(page);
+ objnr = obj_to_index(cachep, slabp, ptr);
+ BUG_ON(objnr >= cachep->num);
+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
-+ return;
-+
-+report:
-+ pax_report_usercopy(ptr, n, to, type);
-+#endif
++ return NULL;
+
++ return cachep->name;
+}
-+EXPORT_SYMBOL(check_object_size);
++#endif
+
/**
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
diff --git a/mm/slob.c b/mm/slob.c
-index 837ebd6..0bd23bc 100644
+index 837ebd6..d24d63b 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -29,7 +29,7 @@
@@ -101497,7 +101962,7 @@ index 837ebd6..0bd23bc 100644
return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
-@@ -528,13 +542,92 @@ void kfree(const void *block)
+@@ -528,13 +542,83 @@ void kfree(const void *block)
sp = slob_page(block);
if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
@@ -101515,40 +101980,34 @@ index 837ebd6..0bd23bc 100644
}
EXPORT_SYMBOL(kfree);
-+void check_object_size(const void *ptr, unsigned long n, bool to)
++bool is_usercopy_object(const void *ptr)
+{
++ return false;
++}
+
+#ifdef CONFIG_PAX_USERCOPY
++const char *check_heap_object(const void *ptr, unsigned long n, bool to)
++{
+ struct slob_page *sp;
+ const slob_t *free;
+ const void *base;
+ unsigned long flags;
-+ const char *type;
-+
-+ if (!n)
-+ return;
+
-+ type = "<null>";
+ if (ZERO_OR_NULL_PTR(ptr))
-+ goto report;
++ return "<null>";
+
+ if (!virt_addr_valid(ptr))
-+ return;
++ return NULL;
+
-+ type = "<process stack>";
+ sp = slob_page(ptr);
-+ if (!PageSlab((struct page *)sp)) {
-+ if (object_is_on_stack(ptr, n) == -1)
-+ goto report;
-+ return;
-+ }
++ if (!PageSlab((struct page *)sp))
++ return NULL;
+
-+ type = "<slob>";
+ if (sp->size) {
+ base = page_address(&sp->page);
+ if (base <= ptr && n <= sp->size - (ptr - base))
-+ return;
-+ goto report;
++ return NULL;
++ return "<slob>";
+ }
+
+ /* some tricky double walking to find the chunk */
@@ -101579,21 +102038,18 @@ index 837ebd6..0bd23bc 100644
+ break;
+
+ spin_unlock_irqrestore(&slob_lock, flags);
-+ return;
++ return NULL;
+ }
+
+ spin_unlock_irqrestore(&slob_lock, flags);
-+report:
-+ pax_report_usercopy(ptr, n, to, type);
-+#endif
-+
++ return "<slob>";
+}
-+EXPORT_SYMBOL(check_object_size);
++#endif
+
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
size_t ksize(const void *block)
{
-@@ -547,10 +640,10 @@ size_t ksize(const void *block)
+@@ -547,10 +631,10 @@ size_t ksize(const void *block)
sp = slob_page(block);
if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
@@ -101607,11 +102063,11 @@ index 837ebd6..0bd23bc 100644
}
EXPORT_SYMBOL(ksize);
-@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+@@ -566,8 +650,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
{
struct kmem_cache *c;
-+#ifdef CONFIG_PAX_USERCOPY
++#ifdef CONFIG_PAX_USERCOPY_SLABS
+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
+#else
@@ -101621,11 +102077,11 @@ index 837ebd6..0bd23bc 100644
if (c) {
c->name = name;
-@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+@@ -605,17 +694,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
void *b;
-+#ifdef CONFIG_PAX_USERCOPY
++#ifdef CONFIG_PAX_USERCOPY_SLABS
+ b = __kmalloc_node_align(c->size, flags, node, c->align);
+#else
if (c->size < PAGE_SIZE) {
@@ -101647,7 +102103,7 @@ index 837ebd6..0bd23bc 100644
if (c->ctor)
c->ctor(b);
-@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
+@@ -627,10 +724,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
static void __kmem_cache_free(void *b, int size)
{
@@ -101666,13 +102122,13 @@ index 837ebd6..0bd23bc 100644
}
static void kmem_rcu_free(struct rcu_head *head)
-@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
+@@ -643,18 +746,32 @@ static void kmem_rcu_free(struct rcu_head *head)
void kmem_cache_free(struct kmem_cache *c, void *b)
{
+ int size = c->size;
+
-+#ifdef CONFIG_PAX_USERCOPY
++#ifdef CONFIG_PAX_USERCOPY_SLABS
+ if (size + c->align < PAGE_SIZE) {
+ size += c->align;
+ b -= c->align;
@@ -101693,7 +102149,7 @@ index 837ebd6..0bd23bc 100644
+ __kmem_cache_free(b, size);
}
-+#ifdef CONFIG_PAX_USERCOPY
++#ifdef CONFIG_PAX_USERCOPY_SLABS
+ trace_kfree(_RET_IP_, b);
+#else
trace_kmem_cache_free(_RET_IP_, b);
@@ -101703,7 +102159,7 @@ index 837ebd6..0bd23bc 100644
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
-index 4996fc7..87e01d0 100644
+index 4996fc7..38850dd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -201,7 +201,7 @@ struct track {
@@ -101776,58 +102232,89 @@ index 4996fc7..87e01d0 100644
/*
* This function is called with IRQs disabled during early-boot on
-@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
+@@ -2792,6 +2791,10 @@ out:
+ }
+ #endif
+
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++static struct kmem_cache kmalloc_caches_usercopy[SLUB_PAGE_SHIFT];
++#endif
++
+ /*
+ * Conversion table for small slabs sizes / 8 to the index in the
+ * kmalloc array. This is necessary for slabs < 192 since we have non power
+@@ -2847,6 +2850,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
+ return dma_kmalloc_cache(index, flags);
+
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ if (flags & SLAB_USERCOPY)
++ return &kmalloc_caches_usercopy[index];
++
++#endif
++
+ return &kmalloc_caches[index];
+ }
+
+@@ -2915,6 +2925,56 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(__kmalloc_node);
#endif
-+void check_object_size(const void *ptr, unsigned long n, bool to)
++bool is_usercopy_object(const void *ptr)
+{
++ struct page *page;
++ struct kmem_cache *s;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ return false;
++
++ if (!virt_addr_valid(ptr))
++ return false;
++
++ page = virt_to_head_page(ptr);
++
++ if (!PageSlab(page))
++ return false;
++
++ s = page->slab;
++ return s->flags & SLAB_USERCOPY;
++}
+
+#ifdef CONFIG_PAX_USERCOPY
++const char *check_heap_object(const void *ptr, unsigned long n, bool to)
++{
+ struct page *page;
-+ struct kmem_cache *s = NULL;
++ struct kmem_cache *s;
+ unsigned long offset;
-+ const char *type;
-+
-+ if (!n)
-+ return;
+
-+ type = "<null>";
+ if (ZERO_OR_NULL_PTR(ptr))
-+ goto report;
++ return "<null>";
+
+ if (!virt_addr_valid(ptr))
-+ return;
++ return NULL;
+
+ page = get_object_page(ptr);
+
-+ type = "<process stack>";
-+ if (!page) {
-+ if (object_is_on_stack(ptr, n) == -1)
-+ goto report;
-+ return;
-+ }
++ if (!page)
++ return NULL;
+
+ s = page->slab;
-+ type = s->name;
+ if (!(s->flags & SLAB_USERCOPY))
-+ goto report;
++ return s->name;
+
+ offset = (ptr - page_address(page)) % s->size;
+ if (offset <= s->objsize && n <= s->objsize - offset)
-+ return;
-+
-+report:
-+ pax_report_usercopy(ptr, n, to, type);
-+#endif
++ return NULL;
+
++ return s->name;
+}
-+EXPORT_SYMBOL(check_object_size);
++#endif
+
size_t ksize(const void *object)
{
struct page *page;
-@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
+@@ -3185,8 +3245,8 @@ void __init kmem_cache_init(void)
* kmem_cache_open for slab_state == DOWN.
*/
create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
@@ -101838,7 +102325,7 @@ index 4996fc7..87e01d0 100644
caches++;
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
-@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
+@@ -3198,18 +3258,18 @@ void __init kmem_cache_init(void)
/* Caches that are not of the two-to-the-power-of size */
if (KMALLOC_MIN_SIZE <= 32) {
create_kmalloc_cache(&kmalloc_caches[1],
@@ -101860,7 +102347,28 @@ index 4996fc7..87e01d0 100644
caches++;
}
-@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
+@@ -3267,6 +3327,20 @@ void __init kmem_cache_init(void)
+ kmem_size = sizeof(struct kmem_cache);
+ #endif
+
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
++ struct kmem_cache *s = &kmalloc_caches[i];
++
++ if (s->size) {
++ char *name = kasprintf(GFP_NOWAIT, "kmalloc-usercopy-%d", s->objsize);
++
++ BUG_ON(!name);
++ create_kmalloc_cache(&kmalloc_caches_usercopy[i], name,
++ s->objsize, GFP_NOWAIT, SLAB_USERCOPY);
++ }
++ }
++#endif
++
+ printk(KERN_INFO
+ "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
+ " CPUs=%d, Nodes=%d\n",
+@@ -3293,7 +3367,7 @@ static int slab_unmergeable(struct kmem_cache *s)
/*
* We may have set a slab to be unmergeable during bootstrap.
*/
@@ -101869,7 +102377,7 @@ index 4996fc7..87e01d0 100644
return 1;
return 0;
-@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+@@ -3353,7 +3427,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
if (s) {
int cpu;
@@ -101878,7 +102386,7 @@ index 4996fc7..87e01d0 100644
/*
* Adjust the object sizes so that we clear
* the complete object on kzalloc.
-@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+@@ -3372,7 +3446,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
if (sysfs_slab_alias(s, name)) {
down_write(&slub_lock);
@@ -101887,7 +102395,7 @@ index 4996fc7..87e01d0 100644
up_write(&slub_lock);
goto err;
}
-@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
+@@ -4101,7 +4175,7 @@ SLAB_ATTR_RO(ctor);
static ssize_t aliases_show(struct kmem_cache *s, char *buf)
{
@@ -101896,7 +102404,7 @@ index 4996fc7..87e01d0 100644
}
SLAB_ATTR_RO(aliases);
-@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
+@@ -4503,7 +4577,7 @@ static void kmem_cache_release(struct kobject *kobj)
kfree(s);
}
@@ -101905,7 +102413,7 @@ index 4996fc7..87e01d0 100644
.show = slab_attr_show,
.store = slab_attr_store,
};
-@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
+@@ -4522,7 +4596,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
return 0;
}
@@ -101914,7 +102422,7 @@ index 4996fc7..87e01d0 100644
.filter = uevent_filter,
};
-@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
+@@ -4564,6 +4638,7 @@ static char *create_unique_id(struct kmem_cache *s)
return name;
}
@@ -101922,7 +102430,7 @@ index 4996fc7..87e01d0 100644
static int sysfs_slab_add(struct kmem_cache *s)
{
int err;
-@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
+@@ -4619,6 +4694,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
kobject_del(&s->kobj);
kobject_put(&s->kobj);
}
@@ -101930,7 +102438,7 @@ index 4996fc7..87e01d0 100644
/*
* Need to buffer aliases during bootup until sysfs becomes
-@@ -4632,6 +4677,7 @@ struct saved_alias {
+@@ -4632,6 +4708,7 @@ struct saved_alias {
static struct saved_alias *alias_list;
@@ -101938,7 +102446,7 @@ index 4996fc7..87e01d0 100644
static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
struct saved_alias *al;
-@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+@@ -4654,6 +4731,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
alias_list = al;
return 0;
}
@@ -101946,7 +102454,7 @@ index 4996fc7..87e01d0 100644
static int __init slab_sysfs_init(void)
{
-@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
+@@ -4785,7 +4863,13 @@ static const struct file_operations proc_slabinfo_operations = {
static int __init slab_proc_init(void)
{
@@ -106452,7 +106960,7 @@ index 62a9025..65b82ad 100644
sprintf(alias, "dmi*");
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
-index 03efeab..f65608f 100644
+index 03efeab..35e35ff 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -764,7 +764,7 @@ static void check_section(const char *modname, struct elf_info *elf,
@@ -106503,12 +107011,12 @@ index 03efeab..f65608f 100644
"or drop the export.\n",
tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
+ case DATA_TO_TEXT:
-+/*
++#if 0
+ fprintf(stderr,
-+ "The variable %s references\n"
-+ "the %s %s%s%s\n",
-+ fromsym, to, sec2annotation(tosec), tosym, to_p);
-+*/
++ "The %s %s:%s references\n"
++ "the %s %s:%s%s\n",
++ from, fromsec, fromsym, to, tosec, tosym, to_p);
++#endif
+ break;
case NO_MISMATCH:
/* To get warnings on missing members */
@@ -106647,10 +107155,10 @@ index d52f7a0..b66cdd9 100755
rm -f tags
xtags ctags
diff --git a/security/Kconfig b/security/Kconfig
-index fb363cd..b6ce7c6 100644
+index fb363cd..6426142 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,855 @@
+@@ -4,6 +4,869 @@
menu "Security options"
@@ -106675,6 +107183,9 @@ index fb363cd..b6ce7c6 100644
+ bool
+ default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
+
++ config PAX_USERCOPY_SLABS
++ bool
++
+config GRKERNSEC
+ bool "Grsecurity"
+ select CRYPTO
@@ -106909,13 +107420,12 @@ index fb363cd..b6ce7c6 100644
+ has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
+ support.
+
-+ If you have applications not marked by the PT_PAX_FLAGS ELF program
-+ header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
-+ option otherwise they will not get any protection.
-+
+ Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
+ support as well, they will override the legacy EI_PAX marks.
+
++ If you enable none of the marking options then all applications
++ will run with PaX enabled on them by default.
++
+config PAX_PT_PAX_FLAGS
+ bool 'Use ELF program header marking'
+ default y if GRKERNSEC_CONFIG_AUTO
@@ -106928,15 +107438,14 @@ index fb363cd..b6ce7c6 100644
+ integrated into the toolchain (the binutils patch is available
+ from http://pax.grsecurity.net).
+
-+ If you have applications not marked by the PT_PAX_FLAGS ELF program
-+ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
-+ support otherwise they will not get any protection.
++ Note that if you enable the legacy EI_PAX marking support as well,
++ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
+
+ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
+ must make sure that the marks are the same if a binary has both marks.
+
-+ Note that if you enable the legacy EI_PAX marking support as well,
-+ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
++ If you enable none of the marking options then all applications
++ will run with PaX enabled on them by default.
+
+config PAX_XATTR_PAX_FLAGS
+ bool 'Use filesystem extended attributes marking'
@@ -106959,15 +107468,14 @@ index fb363cd..b6ce7c6 100644
+ isofs, squashfs, tmpfs, udf, vfat) so copying files through such
+ filesystems will lose the extended attributes and these PaX markings.
+
-+ If you have applications not marked by the PT_PAX_FLAGS ELF program
-+ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
-+ support otherwise they will not get any protection.
++ Note that if you enable the legacy EI_PAX marking support as well,
++ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
+
+ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
+ must make sure that the marks are the same if a binary has both marks.
+
-+ Note that if you enable the legacy EI_PAX marking support as well,
-+ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
++ If you enable none of the marking options then all applications
++ will run with PaX enabled on them by default.
+
+choice
+ prompt 'MAC system integration'
@@ -107457,6 +107965,7 @@ index fb363cd..b6ce7c6 100644
+ default y if GRKERNSEC_CONFIG_AUTO
+ depends on X86 || PPC || SPARC || ARM
+ depends on GRKERNSEC && (SLAB || SLUB || SLOB)
++ select PAX_USERCOPY_SLABS
+ help
+ By saying Y here the kernel will enforce the size of heap objects
+ when they are copied in either direction between the kernel and
@@ -107493,6 +108002,19 @@ index fb363cd..b6ce7c6 100644
+ Homepage:
+ http://www.grsecurity.net/~ephox/overflow_plugin/
+
++config PAX_LATENT_ENTROPY
++ bool "Generate some entropy during boot"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ By saying Y here the kernel will instrument early boot code to
++ extract some entropy from both original and artificially created
++ program state. This will help especially embedded systems where
++ there is little 'natural' source of entropy normally. The cost
++ is some slowdown of the boot process.
++
++ Note that entropy extracted this way is not cryptographically
++ secure!
++
+endmenu
+
+endmenu
@@ -107506,7 +108028,7 @@ index fb363cd..b6ce7c6 100644
config KEYS
bool "Enable access key retention support"
help
-@@ -146,7 +995,7 @@ config INTEL_TXT
+@@ -146,7 +1009,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
@@ -108798,12 +109320,19 @@ index 79633ea..9732e90 100644
break;
}
}
+diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
+new file mode 100644
+index 0000000..50f2f2f
+--- /dev/null
++++ b/tools/gcc/.gitignore
+@@ -0,0 +1 @@
++size_overflow_hash.h
diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
new file mode 100644
-index 0000000..f4f9986
+index 0000000..1d09b7e
--- /dev/null
+++ b/tools/gcc/Makefile
-@@ -0,0 +1,41 @@
+@@ -0,0 +1,43 @@
+#CC := gcc
+#PLUGIN_SOURCE_FILES := pax_plugin.c
+#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
@@ -108825,6 +109354,7 @@ index 0000000..f4f9986
+$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
+$(HOSTLIBS)-y += colorize_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
++$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
+
+always := $($(HOSTLIBS)-y)
+
@@ -108835,6 +109365,7 @@ index 0000000..f4f9986
+checker_plugin-objs := checker_plugin.o
+colorize_plugin-objs := colorize_plugin.o
+size_overflow_plugin-objs := size_overflow_plugin.o
++latent_entropy_plugin-objs := latent_entropy_plugin.o
+
+$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
+
@@ -109024,7 +109555,7 @@ index 0000000..d41b5af
+}
diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
new file mode 100644
-index 0000000..7a5e311
+index 0000000..846aeb0
--- /dev/null
+++ b/tools/gcc/colorize_plugin.c
@@ -0,0 +1,148 @@
@@ -109162,7 +109693,7 @@ index 0000000..7a5e311
+ struct register_pass_info colorize_rearm_pass_info = {
+ .pass = &pass_ipa_colorize_rearm.pass,
+ .reference_pass_name = "*free_lang_data",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_AFTER
+ };
+
@@ -109178,7 +109709,7 @@ index 0000000..7a5e311
+}
diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
new file mode 100644
-index 0000000..89b7f56
+index 0000000..048d4ff
--- /dev/null
+++ b/tools/gcc/constify_plugin.c
@@ -0,0 +1,328 @@
@@ -109484,7 +110015,7 @@ index 0000000..89b7f56
+ struct register_pass_info local_variable_pass_info = {
+ .pass = &pass_local_variable.pass,
+ .reference_pass_name = "*referenced_vars",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_AFTER
+ };
+
@@ -109612,7 +110143,7 @@ index 0000000..a0fe8b2
+exit 0
diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
new file mode 100644
-index 0000000..a5eabce
+index 0000000..a86e422
--- /dev/null
+++ b/tools/gcc/kallocstat_plugin.c
@@ -0,0 +1,167 @@
@@ -109769,7 +110300,7 @@ index 0000000..a5eabce
+ struct register_pass_info kallocstat_pass_info = {
+ .pass = &kallocstat_pass.pass,
+ .reference_pass_name = "ssa",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_AFTER
+ };
+
@@ -109785,7 +110316,7 @@ index 0000000..a5eabce
+}
diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
new file mode 100644
-index 0000000..d8a8da2
+index 0000000..98011fa
--- /dev/null
+++ b/tools/gcc/kernexec_plugin.c
@@ -0,0 +1,427 @@
@@ -110161,19 +110692,19 @@ index 0000000..d8a8da2
+ struct register_pass_info kernexec_reload_pass_info = {
+ .pass = &kernexec_reload_pass.pass,
+ .reference_pass_name = "ssa",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_AFTER
+ };
+ struct register_pass_info kernexec_fptr_pass_info = {
+ .pass = &kernexec_fptr_pass.pass,
+ .reference_pass_name = "ssa",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_AFTER
+ };
+ struct register_pass_info kernexec_retaddr_pass_info = {
+ .pass = &kernexec_retaddr_pass.pass,
+ .reference_pass_name = "pro_and_epilogue",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_AFTER
+ };
+
@@ -110216,6 +110747,307 @@ index 0000000..d8a8da2
+
+ return 0;
+}
+diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
+new file mode 100644
+index 0000000..b8008f7
+--- /dev/null
++++ b/tools/gcc/latent_entropy_plugin.c
+@@ -0,0 +1,295 @@
++/*
++ * Copyright 2012 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to help generate a little bit of entropy from program state,
++ * used during boot in the kernel
++ *
++ * TODO:
++ * - add ipa pass to identify not explicitly marked candidate functions
++ * - mix in more program state (function arguments/return values, loop variables, etc)
++ * - more instrumentation control via attribute parameters
++ *
++ * BUGS:
++ * - LTO needs -flto-partition=none for now
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "tree-flow.h"
++
++int plugin_is_GPL_compatible;
++
++static tree latent_entropy_decl;
++
++static struct plugin_info latent_entropy_plugin_info = {
++ .version = "201207271820",
++ .help = NULL
++};
++
++static unsigned int execute_latent_entropy(void);
++static bool gate_latent_entropy(void);
++
++static struct gimple_opt_pass latent_entropy_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "latent_entropy",
++ .gate = gate_latent_entropy,
++ .execute = execute_latent_entropy,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = PROP_gimple_leh | PROP_cfg,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
++ }
++};
++
++static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
++{
++ if (TREE_CODE(*node) != FUNCTION_DECL) {
++ *no_add_attrs = true;
++ error("%qE attribute only applies to functions", name);
++ }
++ return NULL_TREE;
++}
++
++static struct attribute_spec latent_entropy_attr = {
++ .name = "latent_entropy",
++ .min_length = 0,
++ .max_length = 0,
++ .decl_required = true,
++ .type_required = false,
++ .function_type_required = false,
++ .handler = handle_latent_entropy_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++ .affects_type_identity = false
++#endif
++};
++
++static void register_attributes(void *event_data, void *data)
++{
++ register_attribute(&latent_entropy_attr);
++}
++
++static bool gate_latent_entropy(void)
++{
++ tree latent_entropy_attr;
++
++ latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
++ return latent_entropy_attr != NULL_TREE;
++}
++
++static unsigned HOST_WIDE_INT seed;
++static unsigned HOST_WIDE_INT get_random_const(void)
++{
++ seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
++ return seed;
++}
++
++static enum tree_code get_op(tree *rhs)
++{
++ static enum tree_code op;
++ unsigned HOST_WIDE_INT random_const;
++
++ random_const = get_random_const();
++
++ switch (op) {
++ case BIT_XOR_EXPR:
++ op = PLUS_EXPR;
++ break;
++
++ case PLUS_EXPR:
++ if (rhs) {
++ op = LROTATE_EXPR;
++ random_const &= HOST_BITS_PER_WIDE_INT - 1;
++ break;
++ }
++
++ case LROTATE_EXPR:
++ default:
++ op = BIT_XOR_EXPR;
++ break;
++ }
++ if (rhs)
++ *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
++ return op;
++}
++
++static void perturb_local_entropy(basic_block bb, tree local_entropy)
++{
++ gimple_stmt_iterator gsi;
++ gimple assign;
++ tree addxorrol, rhs;
++ enum tree_code op;
++
++ op = get_op(&rhs);
++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
++ assign = gimple_build_assign(local_entropy, addxorrol);
++ find_referenced_vars_in(assign);
++//debug_bb(bb);
++ gsi = gsi_after_labels(bb);
++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++}
++
++static void perturb_latent_entropy(basic_block bb, tree rhs)
++{
++ gimple_stmt_iterator gsi;
++ gimple assign;
++ tree addxorrol, temp;
++
++ // 1. create temporary copy of latent_entropy
++ temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
++ add_referenced_var(temp);
++ mark_sym_for_renaming(temp);
++
++ // 2. read...
++ assign = gimple_build_assign(temp, latent_entropy_decl);
++ find_referenced_vars_in(assign);
++ gsi = gsi_after_labels(bb);
++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++
++ // 3. ...modify...
++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
++ assign = gimple_build_assign(temp, addxorrol);
++ find_referenced_vars_in(assign);
++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++
++ // 4. ...write latent_entropy
++ assign = gimple_build_assign(latent_entropy_decl, temp);
++ find_referenced_vars_in(assign);
++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++}
++
++static unsigned int execute_latent_entropy(void)
++{
++ basic_block bb;
++ gimple assign;
++ gimple_stmt_iterator gsi;
++ tree local_entropy;
++
++ if (!latent_entropy_decl) {
++ struct varpool_node *node;
++
++ for (node = varpool_nodes; node; node = node->next) {
++ tree var = node->decl;
++ if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
++ continue;
++ latent_entropy_decl = var;
++// debug_tree(var);
++ break;
++ }
++ if (!latent_entropy_decl) {
++// debug_tree(current_function_decl);
++ return 0;
++ }
++ }
++
++//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
++
++ // 1. create local entropy variable
++ local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
++ add_referenced_var(local_entropy);
++ mark_sym_for_renaming(local_entropy);
++
++ // 2. initialize local entropy variable
++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
++ if (dom_info_available_p(CDI_DOMINATORS))
++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
++ gsi = gsi_start_bb(bb);
++
++ assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
++// gimple_set_location(assign, loc);
++ find_referenced_vars_in(assign);
++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ bb = bb->next_bb;
++
++ // 3. instrument each BB with an operation on the local entropy variable
++ while (bb != EXIT_BLOCK_PTR) {
++ perturb_local_entropy(bb, local_entropy);
++ bb = bb->next_bb;
++ };
++
++ // 4. mix local entropy into the global entropy variable
++ perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
++ return 0;
++}
++
++static void start_unit_callback(void *gcc_data, void *user_data)
++{
++#if BUILDING_GCC_VERSION >= 4007
++ seed = get_random_seed(false);
++#else
++ sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
++ seed *= seed;
++#endif
++
++ if (in_lto_p)
++ return;
++
++ // extern u64 latent_entropy
++ latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node);
++
++ TREE_STATIC(latent_entropy_decl) = 1;
++ TREE_PUBLIC(latent_entropy_decl) = 1;
++ TREE_USED(latent_entropy_decl) = 1;
++ TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
++ DECL_EXTERNAL(latent_entropy_decl) = 1;
++ DECL_ARTIFICIAL(latent_entropy_decl) = 0;
++ DECL_INITIAL(latent_entropy_decl) = NULL;
++// DECL_ASSEMBLER_NAME(latent_entropy_decl);
++// varpool_finalize_decl(latent_entropy_decl);
++// varpool_mark_needed_node(latent_entropy_decl);
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ struct register_pass_info latent_entropy_pass_info = {
++ .pass = &latent_entropy_pass.pass,
++ .reference_pass_name = "optimized",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_BEFORE
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
++ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++ return 0;
++}
diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
new file mode 100644
index 0000000..eb35e4a
@@ -113285,7 +114117,7 @@ index 0000000..cc96254
+}
diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
new file mode 100644
-index 0000000..b87ec9d
+index 0000000..38d2014
--- /dev/null
+++ b/tools/gcc/stackleak_plugin.c
@@ -0,0 +1,313 @@
@@ -113558,13 +114390,13 @@ index 0000000..b87ec9d
+ .pass = &stackleak_tree_instrument_pass.pass,
+// .reference_pass_name = "tree_profile",
+ .reference_pass_name = "optimized",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_BEFORE
+ };
+ struct register_pass_info stackleak_final_pass_info = {
+ .pass = &stackleak_final_rtl_opt_pass.pass,
+ .reference_pass_name = "final",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_BEFORE
+ };
+
diff --git a/2.6.32/4450_grsec-kconfig-default-gids.patch b/2.6.32/4450_grsec-kconfig-default-gids.patch
index 3bf6bd2..e7b920b 100644
--- a/2.6.32/4450_grsec-kconfig-default-gids.patch
+++ b/2.6.32/4450_grsec-kconfig-default-gids.patch
@@ -73,7 +73,7 @@ diff -Nuar a/grsecurity/Kconfig b/Kconfig
diff -Nuar a/security/Kconfig b/security/Kconfig
--- a/security/Kconfig 2012-07-01 12:51:41.000000000 -0400
+++ b/security/Kconfig 2012-07-01 13:00:23.000000000 -0400
-@@ -187,7 +187,7 @@
+@@ -190,7 +190,7 @@
config GRKERNSEC_PROC_GID
int "GID exempted from /proc restrictions"
diff --git a/3.2.23/0000_README b/3.2.24/0000_README
index 998a3bc..51bc4a5 100644
--- a/3.2.23/0000_README
+++ b/3.2.24/0000_README
@@ -10,7 +10,11 @@ Patch: 1022_linux-3.2.23.patch
From: http://www.kernel.org
Desc: Linux 3.2.23
-Patch: 4420_grsecurity-2.9.1-3.2.23-201207242236.patch
+Patch: 1023_linux-3.2.24.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.24
+
+Patch: 4420_grsecurity-2.9.1-3.2.24-201207281946.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.23/1021_linux-3.2.22.patch b/3.2.24/1021_linux-3.2.22.patch
index e6ad93a..e6ad93a 100644
--- a/3.2.23/1021_linux-3.2.22.patch
+++ b/3.2.24/1021_linux-3.2.22.patch
diff --git a/3.2.23/1022_linux-3.2.23.patch b/3.2.24/1022_linux-3.2.23.patch
index 3d796d0..3d796d0 100644
--- a/3.2.23/1022_linux-3.2.23.patch
+++ b/3.2.24/1022_linux-3.2.23.patch
diff --git a/3.2.24/1023_linux-3.2.24.patch b/3.2.24/1023_linux-3.2.24.patch
new file mode 100644
index 0000000..4692eb4
--- /dev/null
+++ b/3.2.24/1023_linux-3.2.24.patch
@@ -0,0 +1,4684 @@
+diff --git a/Makefile b/Makefile
+index 40d1e3b..80bb4fd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 23
++SUBLEVEL = 24
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
+index 33ecd0c..b1e05cc 100644
+--- a/arch/arm/plat-samsung/adc.c
++++ b/arch/arm/plat-samsung/adc.c
+@@ -157,11 +157,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
+ return -EINVAL;
+ }
+
+- if (client->is_ts && adc->ts_pend)
+- return -EAGAIN;
+-
+ spin_lock_irqsave(&adc->lock, flags);
+
++ if (client->is_ts && adc->ts_pend) {
++ spin_unlock_irqrestore(&adc->lock, flags);
++ return -EAGAIN;
++ }
++
+ client->channel = channel;
+ client->nr_samples = nr_samples;
+
+diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
+index 97f8bf6..adda036 100644
+--- a/arch/mips/include/asm/thread_info.h
++++ b/arch/mips/include/asm/thread_info.h
+@@ -60,6 +60,8 @@ struct thread_info {
+ register struct thread_info *__current_thread_info __asm__("$28");
+ #define current_thread_info() __current_thread_info
+
++#endif /* !__ASSEMBLY__ */
++
+ /* thread information allocation */
+ #if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
+ #define THREAD_SIZE_ORDER (1)
+@@ -97,8 +99,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
+
+ #define free_thread_info(info) kfree(info)
+
+-#endif /* !__ASSEMBLY__ */
+-
+ #define PREEMPT_ACTIVE 0x10000000
+
+ /*
+diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
+index a81176f..be281c6 100644
+--- a/arch/mips/kernel/vmlinux.lds.S
++++ b/arch/mips/kernel/vmlinux.lds.S
+@@ -1,5 +1,6 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/page.h>
++#include <asm/thread_info.h>
+ #include <asm-generic/vmlinux.lds.h>
+
+ #undef mips
+@@ -73,7 +74,7 @@ SECTIONS
+ .data : { /* Data */
+ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
+
+- INIT_TASK_DATA(PAGE_SIZE)
++ INIT_TASK_DATA(THREAD_SIZE)
+ NOSAVE_DATA
+ CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
+index 98b7c4b..fa3f921 100644
+--- a/arch/powerpc/include/asm/cputime.h
++++ b/arch/powerpc/include/asm/cputime.h
+@@ -126,11 +126,11 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct)
+ /*
+ * Convert cputime <-> microseconds
+ */
+-extern u64 __cputime_msec_factor;
++extern u64 __cputime_usec_factor;
+
+ static inline unsigned long cputime_to_usecs(const cputime_t ct)
+ {
+- return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
++ return mulhdu(ct, __cputime_usec_factor);
+ }
+
+ static inline cputime_t usecs_to_cputime(const unsigned long us)
+@@ -143,7 +143,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
+ sec = us / 1000000;
+ if (ct) {
+ ct *= tb_ticks_per_sec;
+- do_div(ct, 1000);
++ do_div(ct, 1000000);
+ }
+ if (sec)
+ ct += (cputime_t) sec * tb_ticks_per_sec;
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index 5db163c..ec8affe 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -168,13 +168,13 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq);
+ #ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ /*
+ * Factors for converting from cputime_t (timebase ticks) to
+- * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
++ * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
+ * These are all stored as 0.64 fixed-point binary fractions.
+ */
+ u64 __cputime_jiffies_factor;
+ EXPORT_SYMBOL(__cputime_jiffies_factor);
+-u64 __cputime_msec_factor;
+-EXPORT_SYMBOL(__cputime_msec_factor);
++u64 __cputime_usec_factor;
++EXPORT_SYMBOL(__cputime_usec_factor);
+ u64 __cputime_sec_factor;
+ EXPORT_SYMBOL(__cputime_sec_factor);
+ u64 __cputime_clockt_factor;
+@@ -192,8 +192,8 @@ static void calc_cputime_factors(void)
+
+ div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
+ __cputime_jiffies_factor = res.result_low;
+- div128_by_32(1000, 0, tb_ticks_per_sec, &res);
+- __cputime_msec_factor = res.result_low;
++ div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
++ __cputime_usec_factor = res.result_low;
+ div128_by_32(1, 0, tb_ticks_per_sec, &res);
+ __cputime_sec_factor = res.result_low;
+ div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 4558f0d..479d03c 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -416,12 +416,14 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
+ return 0;
+ }
+
+- if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
++ if (intsrc->source_irq == 0) {
+ if (acpi_skip_timer_override) {
+- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
++ printk(PREFIX "BIOS IRQ0 override ignored.\n");
+ return 0;
+ }
+- if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
++
++ if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
++ && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+ intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
+ printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+ }
+@@ -1327,17 +1329,12 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d)
+ }
+
+ /*
+- * Force ignoring BIOS IRQ0 pin2 override
++ * Force ignoring BIOS IRQ0 override
+ */
+ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
+ {
+- /*
+- * The ati_ixp4x0_rev() early PCI quirk should have set
+- * the acpi_skip_timer_override flag already:
+- */
+ if (!acpi_skip_timer_override) {
+- WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
+- pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
++ pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
+ d->ident);
+ acpi_skip_timer_override = 1;
+ }
+@@ -1431,7 +1428,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
+ * is enabled. This input is incorrectly designated the
+ * ISA IRQ 0 via an interrupt source override even though
+ * it is wired to the output of the master 8259A and INTIN0
+- * is not connected at all. Force ignoring BIOS IRQ0 pin2
++ * is not connected at all. Force ignoring BIOS IRQ0
+ * override in that cases.
+ */
+ {
+@@ -1466,6 +1463,14 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
+ },
+ },
++ {
++ .callback = dmi_ignore_irq0_timer_override,
++ .ident = "FUJITSU SIEMENS",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
++ },
++ },
+ {}
+ };
+
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 37a458b..e61f79c 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -460,6 +460,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+ },
+ },
++ { /* Handle problems with rebooting on the Precision M6600. */
++ .callback = set_pci_reboot,
++ .ident = "Dell OptiPlex 990",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
++ },
++ },
+ { }
+ };
+
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index 688be8a..9e76a32 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
+ break;
+ }
+
++ if (capable(CAP_SYS_RAWIO))
++ return 0;
++
+ /* In particular, rule out all resets and host-specific ioctls. */
+ printk_ratelimited(KERN_WARNING
+ "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
+
+- return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY;
++ return -ENOTTY;
+ }
+ EXPORT_SYMBOL(scsi_verify_blk_ioctl);
+
+diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
+index c850de4..eff7222 100644
+--- a/drivers/acpi/processor_core.c
++++ b/drivers/acpi/processor_core.c
+@@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+ * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
+ * }
+ *
+- * Ignores apic_id and always return 0 for CPU0's handle.
++ * Ignores apic_id and always returns 0 for the processor
++ * handle with acpi id 0 if nr_cpu_ids is 1.
++ * This should be the case if SMP tables are not found.
+ * Return -1 for other CPU's handle.
+ */
+- if (acpi_id == 0)
++ if (nr_cpu_ids <= 1 && acpi_id == 0)
+ return acpi_id;
+ else
+ return apic_id;
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index ca191ff..ed6bc52 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -702,8 +702,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
+ * can wake the system. _S0W may be valid, too.
+ */
+ if (acpi_target_sleep_state == ACPI_STATE_S0 ||
+- (device_may_wakeup(dev) &&
+- adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
++ (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
++ adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
+ acpi_status status;
+
+ acpi_method[3] = 'W';
+diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
+index 9f66181..240a244 100644
+--- a/drivers/acpi/sysfs.c
++++ b/drivers/acpi/sysfs.c
+@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
+ {
+ int result = 0;
+
+- if (!strncmp(val, "enable", strlen("enable") - 1)) {
++ if (!strncmp(val, "enable", strlen("enable"))) {
+ result = acpi_debug_trace(trace_method_name, trace_debug_level,
+ trace_debug_layer, 0);
+ if (result)
+@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
+ goto exit;
+ }
+
+- if (!strncmp(val, "disable", strlen("disable") - 1)) {
++ if (!strncmp(val, "disable", strlen("disable"))) {
+ int name = 0;
+ result = acpi_debug_trace((char *)&name, trace_debug_level,
+ trace_debug_layer, 0);
+diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
+index 96198f3..a2da8f2 100644
+--- a/drivers/gpio/gpio-wm8994.c
++++ b/drivers/gpio/gpio-wm8994.c
+@@ -89,8 +89,11 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
+ struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
+ struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+
++ if (value)
++ value = WM8994_GPN_LVL;
++
+ return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
+- WM8994_GPN_DIR, 0);
++ WM8994_GPN_DIR | WM8994_GPN_LVL, value);
+ }
+
+ static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 6aa7716..cc75c4b 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8043,8 +8043,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ if (intel_enable_rc6(dev_priv->dev))
+- rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
+- GEN6_RC_CTL_RC6_ENABLE;
++ rc6_mask = GEN6_RC_CTL_RC6_ENABLE |
++ ((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0);
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ rc6_mask |
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 299d238..899c712 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -514,6 +514,12 @@ static const struct hid_device_id apple_devices[] = {
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
++ .driver_data = APPLE_HAS_FN },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
++ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
++ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index c27b402..95430a0 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1374,6 +1374,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+@@ -1884,6 +1887,7 @@ static const struct hid_device_id hid_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
+@@ -1968,6 +1972,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { }
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index fba3fc4..7db934d 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -125,6 +125,9 @@
+ #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
+ #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
+ #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
+@@ -491,6 +494,9 @@
+ #define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
+ #define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
+
++#define USB_VENDOR_ID_MADCATZ 0x0738
++#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
++
+ #define USB_VENDOR_ID_MCC 0x09db
+ #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
+ #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
+diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
+index d912649..1ba7af2 100644
+--- a/drivers/hwmon/it87.c
++++ b/drivers/hwmon/it87.c
+@@ -2086,7 +2086,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
+
+ /* Start monitoring */
+ it87_write_value(data, IT87_REG_CONFIG,
+- (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
++ (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
+ | (update_vbat ? 0x41 : 0x01));
+ }
+
+diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
+index 61c9cf1..1201a15 100644
+--- a/drivers/hwspinlock/hwspinlock_core.c
++++ b/drivers/hwspinlock/hwspinlock_core.c
+@@ -345,7 +345,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
+ spin_lock_init(&hwlock->lock);
+ hwlock->bank = bank;
+
+- ret = hwspin_lock_register_single(hwlock, i);
++ ret = hwspin_lock_register_single(hwlock, base_id + i);
+ if (ret)
+ goto reg_failed;
+ }
+@@ -354,7 +354,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
+
+ reg_failed:
+ while (--i >= 0)
+- hwspin_lock_unregister_single(i);
++ hwspin_lock_unregister_single(base_id + i);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(hwspin_lock_register);
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index d728875..2189cbf 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -142,6 +142,7 @@ static const struct xpad_device {
+ { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
+ { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
++ { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
+ { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
+ { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
+@@ -164,6 +165,7 @@ static const struct xpad_device {
+ { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
++ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
+ };
+@@ -238,12 +240,14 @@ static struct usb_device_id xpad_table [] = {
+ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
+ XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
++ { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
+ XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
+ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
+ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
+- XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
++ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
++ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
+ { }
+ };
+
+diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
+index 5ec617e..ec58f48 100644
+--- a/drivers/input/mouse/bcm5974.c
++++ b/drivers/input/mouse/bcm5974.c
+@@ -79,6 +79,10 @@
+ #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
+ #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
+ #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
++/* MacbookPro10,1 (unibody, June 2012) */
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
+
+ #define BCM5974_DEVICE(prod) { \
+ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
+@@ -128,6 +132,10 @@ static const struct usb_device_id bcm5974_table[] = {
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
++ /* MacbookPro10,1 */
++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
+ /* Terminating entry */
+ {}
+ };
+@@ -354,6 +362,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
+ { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
+ { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
+ },
++ {
++ USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI,
++ USB_DEVICE_ID_APPLE_WELLSPRING7_ISO,
++ USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
++ HAS_INTEGRATED_BUTTON,
++ 0x84, sizeof(struct bt_data),
++ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
++ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
++ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
++ { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
++ { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
++ },
+ {}
+ };
+
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index f1d5408..a1b8caa 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -59,6 +59,8 @@ static struct protection_domain *pt_domain;
+
+ static struct iommu_ops amd_iommu_ops;
+
++static struct dma_map_ops amd_iommu_dma_ops;
++
+ /*
+ * general struct to manage commands send to an IOMMU
+ */
+@@ -1878,6 +1880,11 @@ static int device_change_notifier(struct notifier_block *nb,
+ list_add_tail(&dma_domain->list, &iommu_pd_list);
+ spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+
++ if (!iommu_pass_through)
++ dev->archdata.dma_ops = &amd_iommu_dma_ops;
++ else
++ dev->archdata.dma_ops = &nommu_dma_ops;
++
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 6269eb0..ef2d493 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -1468,6 +1468,8 @@ static int __init amd_iommu_init(void)
+
+ register_syscore_ops(&amd_iommu_syscore_ops);
+
++ x86_platform.iommu_shutdown = disable_iommus;
++
+ if (iommu_pass_through)
+ goto out;
+
+@@ -1476,7 +1478,6 @@ static int __init amd_iommu_init(void)
+ else
+ printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
+
+- x86_platform.iommu_shutdown = disable_iommus;
+ out:
+ return ret;
+
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index 9bfd057..dae2b7a 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -1080,6 +1080,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ ti->split_io = dm_rh_get_region_size(ms->rh);
+ ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
++ ti->discard_zeroes_data_unsupported = 1;
+
+ ms->kmirrord_wq = alloc_workqueue("kmirrord",
+ WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+@@ -1210,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
+ * We need to dec pending if this was a write.
+ */
+ if (rw == WRITE) {
+- if (!(bio->bi_rw & REQ_FLUSH))
++ if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
+ dm_rh_dec(ms->rh, map_context->ll);
+ return error;
+ }
+diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
+index 7771ed2..69732e0 100644
+--- a/drivers/md/dm-region-hash.c
++++ b/drivers/md/dm-region-hash.c
+@@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
+ return;
+ }
+
++ if (bio->bi_rw & REQ_DISCARD)
++ return;
++
+ /* We must inform the log that the sync count has changed. */
+ log->type->set_region_sync(log, region, 0);
+
+@@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
+ struct bio *bio;
+
+ for (bio = bios->head; bio; bio = bio->bi_next) {
+- if (bio->bi_rw & REQ_FLUSH)
++ if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
+ continue;
+ rh_inc(rh, dm_rh_bio_to_region(rh, bio));
+ }
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 700ecae..d8646d7 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -3700,8 +3700,8 @@ array_state_show(struct mddev *mddev, char *page)
+ return sprintf(page, "%s\n", array_states[st]);
+ }
+
+-static int do_md_stop(struct mddev * mddev, int ro, int is_open);
+-static int md_set_readonly(struct mddev * mddev, int is_open);
++static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
++static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
+ static int do_md_run(struct mddev * mddev);
+ static int restart_array(struct mddev *mddev);
+
+@@ -3717,14 +3717,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ /* stopping an active array */
+ if (atomic_read(&mddev->openers) > 0)
+ return -EBUSY;
+- err = do_md_stop(mddev, 0, 0);
++ err = do_md_stop(mddev, 0, NULL);
+ break;
+ case inactive:
+ /* stopping an active array */
+ if (mddev->pers) {
+ if (atomic_read(&mddev->openers) > 0)
+ return -EBUSY;
+- err = do_md_stop(mddev, 2, 0);
++ err = do_md_stop(mddev, 2, NULL);
+ } else
+ err = 0; /* already inactive */
+ break;
+@@ -3732,7 +3732,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ break; /* not supported yet */
+ case readonly:
+ if (mddev->pers)
+- err = md_set_readonly(mddev, 0);
++ err = md_set_readonly(mddev, NULL);
+ else {
+ mddev->ro = 1;
+ set_disk_ro(mddev->gendisk, 1);
+@@ -3742,7 +3742,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ case read_auto:
+ if (mddev->pers) {
+ if (mddev->ro == 0)
+- err = md_set_readonly(mddev, 0);
++ err = md_set_readonly(mddev, NULL);
+ else if (mddev->ro == 1)
+ err = restart_array(mddev);
+ if (err == 0) {
+@@ -5078,15 +5078,17 @@ void md_stop(struct mddev *mddev)
+ }
+ EXPORT_SYMBOL_GPL(md_stop);
+
+-static int md_set_readonly(struct mddev *mddev, int is_open)
++static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ {
+ int err = 0;
+ mutex_lock(&mddev->open_mutex);
+- if (atomic_read(&mddev->openers) > is_open) {
++ if (atomic_read(&mddev->openers) > !!bdev) {
+ printk("md: %s still in use.\n",mdname(mddev));
+ err = -EBUSY;
+ goto out;
+ }
++ if (bdev)
++ sync_blockdev(bdev);
+ if (mddev->pers) {
+ __md_stop_writes(mddev);
+
+@@ -5108,18 +5110,26 @@ out:
+ * 0 - completely stop and dis-assemble array
+ * 2 - stop but do not disassemble array
+ */
+-static int do_md_stop(struct mddev * mddev, int mode, int is_open)
++static int do_md_stop(struct mddev * mddev, int mode,
++ struct block_device *bdev)
+ {
+ struct gendisk *disk = mddev->gendisk;
+ struct md_rdev *rdev;
+
+ mutex_lock(&mddev->open_mutex);
+- if (atomic_read(&mddev->openers) > is_open ||
++ if (atomic_read(&mddev->openers) > !!bdev ||
+ mddev->sysfs_active) {
+ printk("md: %s still in use.\n",mdname(mddev));
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
++ if (bdev)
++ /* It is possible IO was issued on some other
++ * open file which was closed before we took ->open_mutex.
++ * As that was not the last close __blkdev_put will not
++ * have called sync_blockdev, so we must.
++ */
++ sync_blockdev(bdev);
+
+ if (mddev->pers) {
+ if (mddev->ro)
+@@ -5193,7 +5203,7 @@ static void autorun_array(struct mddev *mddev)
+ err = do_md_run(mddev);
+ if (err) {
+ printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
+- do_md_stop(mddev, 0, 0);
++ do_md_stop(mddev, 0, NULL);
+ }
+ }
+
+@@ -6184,11 +6194,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
+ goto done_unlock;
+
+ case STOP_ARRAY:
+- err = do_md_stop(mddev, 0, 1);
++ err = do_md_stop(mddev, 0, bdev);
+ goto done_unlock;
+
+ case STOP_ARRAY_RO:
+- err = md_set_readonly(mddev, 1);
++ err = md_set_readonly(mddev, bdev);
+ goto done_unlock;
+
+ case BLKROSET:
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 7af60ec..2d97bf0 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1713,8 +1713,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
+
+ if (atomic_dec_and_test(&r1_bio->remaining)) {
+ /* if we're here, all write(s) have completed, so clean up */
+- md_done_sync(mddev, r1_bio->sectors, 1);
+- put_buf(r1_bio);
++ int s = r1_bio->sectors;
++ if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
++ test_bit(R1BIO_WriteError, &r1_bio->state))
++ reschedule_retry(r1_bio);
++ else {
++ put_buf(r1_bio);
++ md_done_sync(mddev, s, 1);
++ }
+ }
+ }
+
+@@ -2378,9 +2384,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
+ */
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ atomic_set(&r1_bio->remaining, read_targets);
+- for (i=0; i<conf->raid_disks; i++) {
++ for (i = 0; i < conf->raid_disks && read_targets; i++) {
+ bio = r1_bio->bios[i];
+ if (bio->bi_end_io == end_sync_read) {
++ read_targets--;
+ md_sync_acct(bio->bi_bdev, nr_sectors);
+ generic_make_request(bio);
+ }
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 6ba4954..26ef63a 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
+ BUG_ON(!list_empty(&sh->lru));
+ BUG_ON(atomic_read(&conf->active_stripes)==0);
+ if (test_bit(STRIPE_HANDLE, &sh->state)) {
+- if (test_bit(STRIPE_DELAYED, &sh->state))
++ if (test_bit(STRIPE_DELAYED, &sh->state) &&
++ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ list_add_tail(&sh->lru, &conf->delayed_list);
+ else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
+ sh->bm_seq - conf->seq_write > 0)
+ list_add_tail(&sh->lru, &conf->bitmap_list);
+ else {
++ clear_bit(STRIPE_DELAYED, &sh->state);
+ clear_bit(STRIPE_BIT_DELAY, &sh->state);
+ list_add_tail(&sh->lru, &conf->handle_list);
+ }
+diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
+index f732877..d5cda35 100644
+--- a/drivers/media/dvb/dvb-core/dvbdev.c
++++ b/drivers/media/dvb/dvb-core/dvbdev.c
+@@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ if (minor == MAX_DVB_MINORS) {
+ kfree(dvbdevfops);
+ kfree(dvbdev);
++ up_write(&minor_rwsem);
+ mutex_unlock(&dvbdev_register_lock);
+ return -EINVAL;
+ }
+diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
+index 34c03be..83e8e1b 100644
+--- a/drivers/mtd/nand/nandsim.c
++++ b/drivers/mtd/nand/nandsim.c
+@@ -28,7 +28,7 @@
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+ #include <linux/vmalloc.h>
+-#include <asm/div64.h>
++#include <linux/math64.h>
+ #include <linux/slab.h>
+ #include <linux/errno.h>
+ #include <linux/string.h>
+@@ -547,12 +547,6 @@ static char *get_partition_name(int i)
+ return kstrdup(buf, GFP_KERNEL);
+ }
+
+-static uint64_t divide(uint64_t n, uint32_t d)
+-{
+- do_div(n, d);
+- return n;
+-}
+-
+ /*
+ * Initialize the nandsim structure.
+ *
+@@ -581,7 +575,7 @@ static int init_nandsim(struct mtd_info *mtd)
+ ns->geom.oobsz = mtd->oobsize;
+ ns->geom.secsz = mtd->erasesize;
+ ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
+- ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
++ ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
+ ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
+ ns->geom.secshift = ffs(ns->geom.secsz) - 1;
+ ns->geom.pgshift = chip->page_shift;
+@@ -924,7 +918,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
+
+ if (!rptwear)
+ return 0;
+- wear_eb_count = divide(mtd->size, mtd->erasesize);
++ wear_eb_count = div_u64(mtd->size, mtd->erasesize);
+ mem = wear_eb_count * sizeof(unsigned long);
+ if (mem / sizeof(unsigned long) != wear_eb_count) {
+ NS_ERR("Too many erase blocks for wear reporting\n");
+diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
+index 3680aa2..2cf084e 100644
+--- a/drivers/net/bonding/bond_debugfs.c
++++ b/drivers/net/bonding/bond_debugfs.c
+@@ -6,7 +6,7 @@
+ #include "bonding.h"
+ #include "bond_alb.h"
+
+-#ifdef CONFIG_DEBUG_FS
++#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
+
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 1a88e38..6c284d1 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3184,6 +3184,12 @@ static int bond_master_netdev_event(unsigned long event,
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ return bond_event_changename(event_bond);
++ case NETDEV_UNREGISTER:
++ bond_remove_proc_entry(event_bond);
++ break;
++ case NETDEV_REGISTER:
++ bond_create_proc_entry(event_bond);
++ break;
+ default:
+ break;
+ }
+@@ -4391,8 +4397,6 @@ static void bond_uninit(struct net_device *bond_dev)
+
+ bond_work_cancel_all(bond);
+
+- bond_remove_proc_entry(bond);
+-
+ bond_debug_unregister(bond);
+
+ __hw_addr_flush(&bond->mc_list);
+@@ -4794,7 +4798,6 @@ static int bond_init(struct net_device *bond_dev)
+
+ bond_set_lockdep_class(bond_dev);
+
+- bond_create_proc_entry(bond);
+ list_add_tail(&bond->bond_list, &bn->dev_list);
+
+ bond_prepare_sysfs_group(bond);
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index eccdcff..5ae7df7 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -267,7 +267,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
+ dev_warn(&pdev->dev, "stop mac failed\n");
+ atl1c_set_aspm(hw, false);
+ netif_carrier_off(netdev);
+- netif_stop_queue(netdev);
+ atl1c_phy_reset(hw);
+ atl1c_phy_init(&adapter->hw);
+ } else {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index aec7212..8dda46a 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -723,21 +723,6 @@ struct bnx2x_fastpath {
+
+ #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
+
+-#define BNX2X_IP_CSUM_ERR(cqe) \
+- (!((cqe)->fast_path_cqe.status_flags & \
+- ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
+- ((cqe)->fast_path_cqe.type_error_flags & \
+- ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
+-
+-#define BNX2X_L4_CSUM_ERR(cqe) \
+- (!((cqe)->fast_path_cqe.status_flags & \
+- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
+- ((cqe)->fast_path_cqe.type_error_flags & \
+- ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+-
+-#define BNX2X_RX_CSUM_OK(cqe) \
+- (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
+-
+ #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
+ (((le16_to_cpu(flags) & \
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 580b44e..2c1a5c0 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -220,7 +220,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
+
+ if ((netif_tx_queue_stopped(txq)) &&
+ (bp->state == BNX2X_STATE_OPEN) &&
+- (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
++ (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
+ netif_tx_wake_queue(txq);
+
+ __netif_tx_unlock(txq);
+@@ -551,6 +551,26 @@ static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
+ le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
+ }
+
++static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
++ struct bnx2x_fastpath *fp)
++{
++ /* Do nothing if no IP/L4 csum validation was done */
++
++ if (cqe->fast_path_cqe.status_flags &
++ (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
++ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
++ return;
++
++ /* If both IP/L4 validation were done, check if an error was found. */
++
++ if (cqe->fast_path_cqe.type_error_flags &
++ (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
++ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
++ fp->eth_q_stats.hw_csum_err++;
++ else
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++}
++
+ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+ {
+ struct bnx2x *bp = fp->bp;
+@@ -746,13 +766,9 @@ reuse_rx:
+
+ skb_checksum_none_assert(skb);
+
+- if (bp->dev->features & NETIF_F_RXCSUM) {
++ if (bp->dev->features & NETIF_F_RXCSUM)
++ bnx2x_csum_validate(skb, cqe, fp);
+
+- if (likely(BNX2X_RX_CSUM_OK(cqe)))
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- else
+- fp->eth_q_stats.hw_csum_err++;
+- }
+ }
+
+ skb_record_rx_queue(skb, fp->index);
+@@ -2238,8 +2254,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
+ /* we split the first BD into headers and data BDs
+ * to ease the pain of our fellow microcode engineers
+ * we use one mapping for both BDs
+- * So far this has only been observed to happen
+- * in Other Operating Systems(TM)
+ */
+ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata,
+@@ -2890,7 +2904,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ txdata->tx_bd_prod += nbd;
+
+- if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
++ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
+ netif_tx_stop_queue(txq);
+
+ /* paired memory barrier is in bnx2x_tx_int(), we have to keep
+@@ -2899,7 +2913,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ smp_mb();
+
+ fp->eth_q_stats.driver_xoff++;
+- if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
++ if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
+ netif_tx_wake_queue(txq);
+ }
+ txdata->tx_pkt++;
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 2dcac28..6b258d9 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -14046,7 +14046,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
+ }
+ }
+
+- if (tg3_flag(tp, 5755_PLUS))
++ if (tg3_flag(tp, 5755_PLUS) ||
++ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_flag_set(tp, SHORT_DMA_BUG);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
+index e556fc3..3072d35 100644
+--- a/drivers/net/ethernet/intel/e1000e/82571.c
++++ b/drivers/net/ethernet/intel/e1000e/82571.c
+@@ -1571,6 +1571,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+ rxcw = er32(RXCW);
++ /* SYNCH bit and IV bit are sticky */
++ udelay(10);
++ rxcw = er32(RXCW);
+
+ if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
+
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index cc2565c..9e61d6b 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -4185,6 +4185,7 @@ out:
+ return rc;
+
+ err_out_msi_4:
++ netif_napi_del(&tp->napi);
+ rtl_disable_msi(pdev, tp);
+ iounmap(ioaddr);
+ err_out_free_res_3:
+@@ -4210,6 +4211,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
+
+ cancel_delayed_work_sync(&tp->task);
+
++ netif_napi_del(&tp->napi);
++
+ unregister_netdev(dev);
+
+ rtl_release_firmware(tp);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 72cd190..d4d2bc1 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1174,6 +1174,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
+ wmb();
+ priv->hw->desc->set_tx_owner(desc);
++ wmb();
+ }
+
+ /* Interrupt on completition only for the latest segment */
+@@ -1189,6 +1190,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ /* To avoid raise condition */
+ priv->hw->desc->set_tx_owner(first);
++ wmb();
+
+ priv->cur_tx++;
+
+@@ -1252,6 +1254,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
+ }
+ wmb();
+ priv->hw->desc->set_rx_owner(p + entry);
++ wmb();
+ }
+ }
+
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 1b7082d..26106c0 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -504,10 +504,11 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ if (copy > size) {
+ ++from;
+ --count;
+- }
++ offset = 0;
++ } else
++ offset += size;
+ copy -= size;
+ offset1 += size;
+- offset = 0;
+ }
+
+ if (len == offset1)
+@@ -517,24 +518,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ struct page *page[MAX_SKB_FRAGS];
+ int num_pages;
+ unsigned long base;
++ unsigned long truesize;
+
+- len = from->iov_len - offset1;
++ len = from->iov_len - offset;
+ if (!len) {
+- offset1 = 0;
++ offset = 0;
+ ++from;
+ continue;
+ }
+- base = (unsigned long)from->iov_base + offset1;
++ base = (unsigned long)from->iov_base + offset;
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
++ if (i + size > MAX_SKB_FRAGS)
++ return -EMSGSIZE;
+ num_pages = get_user_pages_fast(base, size, 0, &page[i]);
+- if ((num_pages != size) ||
+- (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
+- /* put_page is in skb free */
++ if (num_pages != size) {
++ for (i = 0; i < num_pages; i++)
++ put_page(page[i]);
+ return -EFAULT;
++ }
++ truesize = size * PAGE_SIZE;
+ skb->data_len += len;
+ skb->len += len;
+- skb->truesize += len;
+- atomic_add(len, &skb->sk->sk_wmem_alloc);
++ skb->truesize += truesize;
++ atomic_add(truesize, &skb->sk->sk_wmem_alloc);
+ while (len) {
+ int off = base & ~PAGE_MASK;
+ int size = min_t(int, len, PAGE_SIZE - off);
+@@ -545,7 +551,7 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ len -= size;
+ i++;
+ }
+- offset1 = 0;
++ offset = 0;
+ ++from;
+ }
+ return 0;
+@@ -645,7 +651,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ int err;
+ struct virtio_net_hdr vnet_hdr = { 0 };
+ int vnet_hdr_len = 0;
+- int copylen;
++ int copylen = 0;
+ bool zerocopy = false;
+
+ if (q->flags & IFF_VNET_HDR) {
+@@ -674,15 +680,31 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ if (unlikely(len < ETH_HLEN))
+ goto err;
+
++ err = -EMSGSIZE;
++ if (unlikely(count > UIO_MAXIOV))
++ goto err;
++
+ if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
+ zerocopy = true;
+
+ if (zerocopy) {
++ /* Userspace may produce vectors with count greater than
++ * MAX_SKB_FRAGS, so we need to linearize parts of the skb
++ * to let the rest of data to be fit in the frags.
++ */
++ if (count > MAX_SKB_FRAGS) {
++ copylen = iov_length(iv, count - MAX_SKB_FRAGS);
++ if (copylen < vnet_hdr_len)
++ copylen = 0;
++ else
++ copylen -= vnet_hdr_len;
++ }
+ /* There are 256 bytes to be copied in skb, so there is enough
+ * room for skb expand head in case it is used.
+ * The rest buffer is mapped from userspace.
+ */
+- copylen = vnet_hdr.hdr_len;
++ if (copylen < vnet_hdr.hdr_len)
++ copylen = vnet_hdr.hdr_len;
+ if (!copylen)
+ copylen = GOODCOPY_LEN;
+ } else
+@@ -693,10 +715,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ if (!skb)
+ goto err;
+
+- if (zerocopy) {
++ if (zerocopy)
+ err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
+- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+- } else
++ else
+ err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
+ len);
+ if (err)
+@@ -715,8 +736,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ rcu_read_lock_bh();
+ vlan = rcu_dereference_bh(q->vlan);
+ /* copy skb_ubuf_info for callback when skb has no error */
+- if (zerocopy)
++ if (zerocopy) {
+ skb_shinfo(skb)->destructor_arg = m->msg_control;
++ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
++ }
+ if (vlan)
+ macvlan_start_xmit(skb, vlan->dev);
+ else
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index ad96164..00ed9c1 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -59,6 +59,7 @@
+ #define USB_PRODUCT_IPHONE_3G 0x1292
+ #define USB_PRODUCT_IPHONE_3GS 0x1294
+ #define USB_PRODUCT_IPHONE_4 0x1297
++#define USB_PRODUCT_IPAD 0x129a
+ #define USB_PRODUCT_IPHONE_4_VZW 0x129c
+ #define USB_PRODUCT_IPHONE_4S 0x12a0
+
+@@ -101,6 +102,10 @@ static struct usb_device_id ipheth_table[] = {
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
++ USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
++ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
++ IPHETH_USBINTF_PROTO) },
++ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
+index 833cbef..8a40ff9 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
+@@ -900,8 +900,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
+ */
+ if (!(txs->status & TX_STATUS_AMPDU)
+ && (txs->status & TX_STATUS_INTERMEDIATE)) {
+- wiphy_err(wlc->wiphy, "%s: INTERMEDIATE but not AMPDU\n",
+- __func__);
++ BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n");
+ return false;
+ }
+
+diff --git a/drivers/net/wireless/ipw2x00/ipw.h b/drivers/net/wireless/ipw2x00/ipw.h
+new file mode 100644
+index 0000000..4007bf5
+--- /dev/null
++++ b/drivers/net/wireless/ipw2x00/ipw.h
+@@ -0,0 +1,23 @@
++/*
++ * Intel Pro/Wireless 2100, 2200BG, 2915ABG network connection driver
++ *
++ * Copyright 2012 Stanislav Yakovlev <stas.yakovlev@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __IPW_H__
++#define __IPW_H__
++
++#include <linux/ieee80211.h>
++
++static const u32 ipw_cipher_suites[] = {
++ WLAN_CIPHER_SUITE_WEP40,
++ WLAN_CIPHER_SUITE_WEP104,
++ WLAN_CIPHER_SUITE_TKIP,
++ WLAN_CIPHER_SUITE_CCMP,
++};
++
++#endif
+diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
+index 127e9c6..10862d4 100644
+--- a/drivers/net/wireless/ipw2x00/ipw2100.c
++++ b/drivers/net/wireless/ipw2x00/ipw2100.c
+@@ -166,6 +166,7 @@ that only one external action is invoked at a time.
+ #include <net/lib80211.h>
+
+ #include "ipw2100.h"
++#include "ipw.h"
+
+ #define IPW2100_VERSION "git-1.2.2"
+
+@@ -1955,6 +1956,9 @@ static int ipw2100_wdev_init(struct net_device *dev)
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
+ }
+
++ wdev->wiphy->cipher_suites = ipw_cipher_suites;
++ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
++
+ set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
+ if (wiphy_register(wdev->wiphy)) {
+ ipw2100_down(priv);
+diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
+index 827889b..56bd370 100644
+--- a/drivers/net/wireless/ipw2x00/ipw2200.c
++++ b/drivers/net/wireless/ipw2x00/ipw2200.c
+@@ -34,6 +34,7 @@
+ #include <linux/slab.h>
+ #include <net/cfg80211-wext.h>
+ #include "ipw2200.h"
++#include "ipw.h"
+
+
+ #ifndef KBUILD_EXTMOD
+@@ -11535,6 +11536,9 @@ static int ipw_wdev_init(struct net_device *dev)
+ wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
+ }
+
++ wdev->wiphy->cipher_suites = ipw_cipher_suites;
++ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
++
+ set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
+
+ /* With that information in place, we can now register the wiphy... */
+diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+index a262c23..0116ca8 100644
+--- a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
++++ b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+@@ -466,7 +466,7 @@ int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+ return 0;
+ }
+
+- if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
++ if (priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
+ IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
+ keyconf->keyidx, key_flags);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+@@ -483,7 +483,7 @@ int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+ sizeof(struct iwl4965_keyinfo));
+ priv->stations[sta_id].sta.key.key_flags =
+ STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+- priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
++ priv->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
+index 2bd5659..1bb64c9 100644
+--- a/drivers/net/wireless/iwlegacy/iwl-core.c
++++ b/drivers/net/wireless/iwlegacy/iwl-core.c
+@@ -1884,14 +1884,12 @@ void iwl_legacy_bg_watchdog(unsigned long data)
+ return;
+
+ /* monitor and check for other stuck queues */
+- if (iwl_legacy_is_any_associated(priv)) {
+- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+- /* skip as we already checked the command queue */
+- if (cnt == priv->cmd_queue)
+- continue;
+- if (iwl_legacy_check_stuck_queue(priv, cnt))
+- return;
+- }
++ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
++ /* skip as we already checked the command queue */
++ if (cnt == priv->cmd_queue)
++ continue;
++ if (iwl_legacy_check_stuck_queue(priv, cnt))
++ return;
+ }
+
+ mod_timer(&priv->watchdog, jiffies +
+diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
+index 1e31050..ba28807 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
++++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
+@@ -426,8 +426,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
+ case QID_RX:
+ if (!rt2x00queue_full(queue))
+ rt2x00queue_for_each_entry(queue,
+- Q_INDEX_DONE,
+ Q_INDEX,
++ Q_INDEX_DONE,
+ NULL,
+ rt2x00usb_kick_rx_entry);
+ break;
+diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c
+index 2e0de2f..c2d5b49 100644
+--- a/drivers/net/wireless/rtl818x/rtl8187/leds.c
++++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c
+@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
+ radio_on = true;
+ } else if (radio_on) {
+ radio_on = false;
+- cancel_delayed_work_sync(&priv->led_on);
++ cancel_delayed_work(&priv->led_on);
+ ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
+ }
+ } else if (radio_on) {
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 12d1e81..d024f83 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -742,6 +742,18 @@ static int pci_pm_suspend_noirq(struct device *dev)
+
+ pci_pm_set_unknown_state(pci_dev);
+
++ /*
++ * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
++ * PCI COMMAND register isn't 0, the BIOS assumes that the controller
++ * hasn't been quiesced and tries to turn it off. If the controller
++ * is already in D3, this can hang or cause memory corruption.
++ *
++ * Since the value of the COMMAND register doesn't matter once the
++ * device has been suspended, we can safely set it to 0 here.
++ */
++ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
++ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
++
+ return 0;
+ }
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index e5b75eb..6d4a531 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1689,11 +1689,6 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
+ if (target_state == PCI_POWER_ERROR)
+ return -EIO;
+
+- /* Some devices mustn't be in D3 during system sleep */
+- if (target_state == PCI_D3hot &&
+- (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
+- return 0;
+-
+ pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
+
+ error = pci_set_power_state(dev, target_state);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 3c56fec..78fda9c 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2940,32 +2940,6 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+
+-/*
+- * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
+- * ASUS motherboards will cause memory corruption or a system crash
+- * if they are in D3 while the system is put into S3 sleep.
+- */
+-static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
+-{
+- const char *sys_info;
+- static const char good_Asus_board[] = "P8Z68-V";
+-
+- if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
+- return;
+- if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
+- return;
+- sys_info = dmi_get_system_info(DMI_BOARD_NAME);
+- if (sys_info && memcmp(sys_info, good_Asus_board,
+- sizeof(good_Asus_board) - 1) == 0)
+- return;
+-
+- dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
+- dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
+- device_set_wakeup_capable(&dev->dev, false);
+-}
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
+-
+ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+ struct pci_fixup *end)
+ {
+diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
+index 809a3ae..b46ec11 100644
+--- a/drivers/platform/x86/intel_ips.c
++++ b/drivers/platform/x86/intel_ips.c
+@@ -72,6 +72,7 @@
+ #include <linux/string.h>
+ #include <linux/tick.h>
+ #include <linux/timer.h>
++#include <linux/dmi.h>
+ #include <drm/i915_drm.h>
+ #include <asm/msr.h>
+ #include <asm/processor.h>
+@@ -1505,6 +1506,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
+
+ MODULE_DEVICE_TABLE(pci, ips_id_table);
+
++static int ips_blacklist_callback(const struct dmi_system_id *id)
++{
++ pr_info("Blacklisted intel_ips for %s\n", id->ident);
++ return 1;
++}
++
++static const struct dmi_system_id ips_blacklist[] = {
++ {
++ .callback = ips_blacklist_callback,
++ .ident = "HP ProBook",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
++ },
++ },
++ { } /* terminating entry */
++};
++
+ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
+ u64 platform_info;
+@@ -1514,6 +1533,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ u16 htshi, trc, trc_required_mask;
+ u8 tse;
+
++ if (dmi_check_system(ips_blacklist))
++ return -ENODEV;
++
+ ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
+ if (!ips)
+ return -ENOMEM;
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index 09e26bf..af1e296 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -540,245 +540,34 @@ static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO,
+ get_performance_level, set_performance_level);
+
+
+-static int __init dmi_check_cb(const struct dmi_system_id *id)
+-{
+- pr_info("found laptop model '%s'\n",
+- id->ident);
+- return 1;
+-}
+-
+ static struct dmi_system_id __initdata samsung_dmi_table[] = {
+ {
+- .ident = "N128",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N128"),
+- DMI_MATCH(DMI_BOARD_NAME, "N128"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N130",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N130"),
+- DMI_MATCH(DMI_BOARD_NAME, "N130"),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
+ },
+- .callback = dmi_check_cb,
+ },
+ {
+- .ident = "N510",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N510"),
+- DMI_MATCH(DMI_BOARD_NAME, "N510"),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ },
+- .callback = dmi_check_cb,
+ },
+ {
+- .ident = "X125",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "X125"),
+- DMI_MATCH(DMI_BOARD_NAME, "X125"),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+- .callback = dmi_check_cb,
+ },
+ {
+- .ident = "X120/X170",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"),
+- DMI_MATCH(DMI_BOARD_NAME, "X120/X170"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "NC10",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
+- DMI_MATCH(DMI_BOARD_NAME, "NC10"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "NP-Q45",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"),
+- DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "X360",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
+- DMI_MATCH(DMI_BOARD_NAME, "X360"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R410 Plus",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
+- DMI_MATCH(DMI_BOARD_NAME, "R460"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R518",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R518"),
+- DMI_MATCH(DMI_BOARD_NAME, "R518"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R519/R719",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"),
+- DMI_MATCH(DMI_BOARD_NAME, "R519/R719"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N150/N210/N220",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
+- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N220",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N220"),
+- DMI_MATCH(DMI_BOARD_NAME, "N220"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N150/N210/N220/N230",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
+- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N150P/N210P/N220P",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"),
+- DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R700",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "SR700"),
+- DMI_MATCH(DMI_BOARD_NAME, "SR700"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R530/R730",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"),
+- DMI_MATCH(DMI_BOARD_NAME, "R530/R730"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "NF110/NF210/NF310",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
+- DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N145P/N250P/N260P",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
+- DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R70/R71",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"),
+- DMI_MATCH(DMI_BOARD_NAME, "R70/R71"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "P460",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "P460"),
+- DMI_MATCH(DMI_BOARD_NAME, "P460"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R528/R728",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R528/R728"),
+- DMI_MATCH(DMI_BOARD_NAME, "R528/R728"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "NC210/NC110",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
+- DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "X520",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "X520"),
+- DMI_MATCH(DMI_BOARD_NAME, "X520"),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
+ },
+- .callback = dmi_check_cb,
+ },
+ { },
+ };
+@@ -819,7 +608,8 @@ static int __init samsung_init(void)
+
+ f0000_segment = ioremap_nocache(0xf0000, 0xffff);
+ if (!f0000_segment) {
+- pr_err("Can't map the segment at 0xf0000\n");
++ if (debug || force)
++ pr_err("Can't map the segment at 0xf0000\n");
+ return -EINVAL;
+ }
+
+@@ -832,7 +622,8 @@ static int __init samsung_init(void)
+ }
+
+ if (loca == 0xffff) {
+- pr_err("This computer does not support SABI\n");
++ if (debug || force)
++ pr_err("This computer does not support SABI\n");
+ goto error_no_signature;
+ }
+
+diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
+index 39e41fb..5160354 100644
+--- a/drivers/rtc/rtc-mxc.c
++++ b/drivers/rtc/rtc-mxc.c
+@@ -191,10 +191,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
+ struct platform_device *pdev = dev_id;
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
++ unsigned long flags;
+ u32 status;
+ u32 events = 0;
+
+- spin_lock_irq(&pdata->rtc->irq_lock);
++ spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
+ status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
+ /* clear interrupt sources */
+ writew(status, ioaddr + RTC_RTCISR);
+@@ -217,7 +218,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
+ rtc_update_alarm(&pdev->dev, &pdata->g_rtc_alarm);
+
+ rtc_update_irq(pdata->rtc, 1, events);
+- spin_unlock_irq(&pdata->rtc->irq_lock);
++ spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
+
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
+index 532d212..393e7ce 100644
+--- a/drivers/scsi/aic94xx/aic94xx_task.c
++++ b/drivers/scsi/aic94xx/aic94xx_task.c
+@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
+
+ if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
+ resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
+- memcpy(&resp->ending_fis[0], r+16, 24);
++ memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
+ ts->buf_valid_size = sizeof(*resp);
+ }
+ }
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index db9238f..4868fc9 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -112,12 +112,12 @@ static void sas_ata_task_done(struct sas_task *task)
+ if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
+ ((stat->stat == SAM_STAT_CHECK_CONDITION &&
+ dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
+- ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
++ memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
+
+ if (!link->sactive) {
+- qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
++ qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
+ } else {
+- link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
++ link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
+ if (unlikely(link->eh_info.err_mask))
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+@@ -138,8 +138,8 @@ static void sas_ata_task_done(struct sas_task *task)
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+
+- dev->sata_dev.tf.feature = 0x04; /* status err */
+- dev->sata_dev.tf.command = ATA_ERR;
++ dev->sata_dev.fis[3] = 0x04; /* status err */
++ dev->sata_dev.fis[2] = ATA_ERR;
+ }
+ }
+
+@@ -252,7 +252,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
+ {
+ struct domain_device *dev = qc->ap->private_data;
+
+- memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf));
++ ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
+ return true;
+ }
+
+diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
+index 65ea65a..93b9406 100644
+--- a/drivers/target/target_core_cdb.c
++++ b/drivers/target/target_core_cdb.c
+@@ -1199,7 +1199,7 @@ int target_emulate_write_same(struct se_task *task)
+ if (num_blocks != 0)
+ range = num_blocks;
+ else
+- range = (dev->transport->get_blocks(dev) - lba);
++ range = (dev->transport->get_blocks(dev) - lba) + 1;
+
+ pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
+ (unsigned long long)lba, (unsigned long long)range);
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index b75bc92..9145141 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -2042,7 +2042,7 @@ static int __core_scsi3_write_aptpl_to_file(
+ if (IS_ERR(file) || !file || !file->f_dentry) {
+ pr_err("filp_open(%s) for APTPL metadata"
+ " failed\n", path);
+- return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
++ return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
+ }
+
+ iov[0].iov_base = &buf[0];
+@@ -3853,7 +3853,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
+ " SPC-2 reservation is held, returning"
+ " RESERVATION_CONFLICT\n");
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+- ret = EINVAL;
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -3863,7 +3863,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
+ */
+ if (!cmd->se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+
+ if (cmd->data_length < 24) {
+diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
+index d95cfe2..278819c 100644
+--- a/drivers/target/tcm_fc/tfc_cmd.c
++++ b/drivers/target/tcm_fc/tfc_cmd.c
+@@ -249,6 +249,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
+ {
+ struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+
++ if (cmd->aborted)
++ return ~0;
+ return fc_seq_exch(cmd->seq)->rxid;
+ }
+
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 19fb5fa..9aaed0d 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -473,6 +473,8 @@ retry:
+ goto retry;
+ }
+ if (!desc->reslength) { /* zero length read */
++ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
++ clear_bit(WDM_READ, &desc->flags);
+ spin_unlock_irq(&desc->iuspin);
+ goto retry;
+ }
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 52d27ed..175b6bb 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2039,12 +2039,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
+ static int hub_port_reset(struct usb_hub *hub, int port1,
+ struct usb_device *udev, unsigned int delay, bool warm);
+
+-/* Is a USB 3.0 port in the Inactive state? */
+-static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
++/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
++ * Port worm reset is required to recover
++ */
++static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
+ {
+ return hub_is_superspeed(hub->hdev) &&
+- (portstatus & USB_PORT_STAT_LINK_STATE) ==
+- USB_SS_PORT_LS_SS_INACTIVE;
++ (((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_SS_INACTIVE) ||
++ ((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_COMP_MOD)) ;
+ }
+
+ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+@@ -2080,7 +2084,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ *
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=41752
+ */
+- if (hub_port_inactive(hub, portstatus)) {
++ if (hub_port_warm_reset_required(hub, portstatus)) {
+ int ret;
+
+ if ((portchange & USB_PORT_STAT_C_CONNECTION))
+@@ -3646,9 +3650,7 @@ static void hub_events(void)
+ /* Warm reset a USB3 protocol port if it's in
+ * SS.Inactive state.
+ */
+- if (hub_is_superspeed(hub->hdev) &&
+- (portstatus & USB_PORT_STAT_LINK_STATE)
+- == USB_SS_PORT_LS_SS_INACTIVE) {
++ if (hub_port_warm_reset_required(hub, portstatus)) {
+ dev_dbg(hub_dev, "warm reset port %d\n", i);
+ hub_port_reset(hub, i, NULL,
+ HUB_BH_RESET_TIME, true);
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index a8b2980..fd8a2c2 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -438,6 +438,42 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
+ }
+ }
+
++/* Updates Link Status for super Speed port */
++static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
++{
++ u32 pls = status_reg & PORT_PLS_MASK;
++
++ /* resume state is a xHCI internal state.
++ * Do not report it to usb core.
++ */
++ if (pls == XDEV_RESUME)
++ return;
++
++ /* When the CAS bit is set then warm reset
++ * should be performed on port
++ */
++ if (status_reg & PORT_CAS) {
++ /* The CAS bit can be set while the port is
++ * in any link state.
++ * Only roothubs have CAS bit, so we
++ * pretend to be in compliance mode
++ * unless we're already in compliance
++ * or the inactive state.
++ */
++ if (pls != USB_SS_PORT_LS_COMP_MOD &&
++ pls != USB_SS_PORT_LS_SS_INACTIVE) {
++ pls = USB_SS_PORT_LS_COMP_MOD;
++ }
++ /* Return also connection bit -
++ * hub state machine resets port
++ * when this bit is set.
++ */
++ pls |= USB_PORT_STAT_CONNECTION;
++ }
++ /* update status field */
++ *status |= pls;
++}
++
+ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+ {
+@@ -579,13 +615,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ else
+ status |= USB_PORT_STAT_POWER;
+ }
+- /* Port Link State */
++ /* Update Port Link State for super speed ports*/
+ if (hcd->speed == HCD_USB3) {
+- /* resume state is a xHCI internal state.
+- * Do not report it to usb core.
+- */
+- if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
+- status |= (temp & PORT_PLS_MASK);
++ xhci_hub_report_link_state(&status, temp);
+ }
+ if (bus_state->port_c_suspend & (1 << wIndex))
+ status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 363b141..7a56805 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -341,7 +341,11 @@ struct xhci_op_regs {
+ #define PORT_PLC (1 << 22)
+ /* port configure error change - port failed to configure its link partner */
+ #define PORT_CEC (1 << 23)
+-/* bit 24 reserved */
++/* Cold Attach Status - xHC can set this bit to report device attached during
++ * Sx state. Warm port reset should be perfomed to clear this bit and move port
++ * to connected state.
++ */
++#define PORT_CAS (1 << 24)
+ /* wake on connect (enable) */
+ #define PORT_WKCONN_E (1 << 25)
+ /* wake on disconnect (enable) */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 21a4734..5971c95 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -496,6 +496,15 @@ static void option_instat_callback(struct urb *urb);
+
+ /* MediaTek products */
+ #define MEDIATEK_VENDOR_ID 0x0e8d
++#define MEDIATEK_PRODUCT_DC_1COM 0x00a0
++#define MEDIATEK_PRODUCT_DC_4COM 0x00a5
++#define MEDIATEK_PRODUCT_DC_5COM 0x00a4
++#define MEDIATEK_PRODUCT_7208_1COM 0x7101
++#define MEDIATEK_PRODUCT_7208_2COM 0x7102
++#define MEDIATEK_PRODUCT_FP_1COM 0x0003
++#define MEDIATEK_PRODUCT_FP_2COM 0x0023
++#define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
++#define MEDIATEK_PRODUCT_FPDC_2COM 0x0033
+
+ /* Cellient products */
+ #define CELLIENT_VENDOR_ID 0x2692
+@@ -553,6 +562,10 @@ static const struct option_blacklist_info net_intf1_blacklist = {
+ .reserved = BIT(1),
+ };
+
++static const struct option_blacklist_info net_intf2_blacklist = {
++ .reserved = BIT(2),
++};
++
+ static const struct option_blacklist_info net_intf3_blacklist = {
+ .reserved = BIT(3),
+ };
+@@ -1093,6 +1106,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+ 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+@@ -1234,6 +1249,17 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index c14c42b..ae66278 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -222,6 +222,8 @@ static int vhost_worker(void *data)
+ if (work) {
+ __set_current_state(TASK_RUNNING);
+ work->fn(work);
++ if (need_resched())
++ schedule();
+ } else
+ schedule();
+
+diff --git a/fs/buffer.c b/fs/buffer.c
+index c807931..4115eca 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -1087,6 +1087,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
+ static struct buffer_head *
+ __getblk_slow(struct block_device *bdev, sector_t block, int size)
+ {
++ int ret;
++ struct buffer_head *bh;
++
+ /* Size must be multiple of hard sectorsize */
+ if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
+ (size < 512 || size > PAGE_SIZE))) {
+@@ -1099,20 +1102,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
+ return NULL;
+ }
+
+- for (;;) {
+- struct buffer_head * bh;
+- int ret;
++retry:
++ bh = __find_get_block(bdev, block, size);
++ if (bh)
++ return bh;
+
++ ret = grow_buffers(bdev, block, size);
++ if (ret == 0) {
++ free_more_memory();
++ goto retry;
++ } else if (ret > 0) {
+ bh = __find_get_block(bdev, block, size);
+ if (bh)
+ return bh;
+-
+- ret = grow_buffers(bdev, block, size);
+- if (ret < 0)
+- return NULL;
+- if (ret == 0)
+- free_more_memory();
+ }
++ return NULL;
+ }
+
+ /*
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index b21670c..56c152d 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2925,6 +2925,18 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+ #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
+ #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
+
++/*
++ * On hosts with high memory, we can't currently support wsize/rsize that are
++ * larger than we can kmap at once. Cap the rsize/wsize at
++ * LAST_PKMAP * PAGE_SIZE. We'll never be able to fill a read or write request
++ * larger than that anyway.
++ */
++#ifdef CONFIG_HIGHMEM
++#define CIFS_KMAP_SIZE_LIMIT (LAST_PKMAP * PAGE_CACHE_SIZE)
++#else /* CONFIG_HIGHMEM */
++#define CIFS_KMAP_SIZE_LIMIT (1<<24)
++#endif /* CONFIG_HIGHMEM */
++
+ static unsigned int
+ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+ {
+@@ -2955,6 +2967,9 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+ wsize = min_t(unsigned int, wsize,
+ server->maxBuf - sizeof(WRITE_REQ) + 4);
+
++ /* limit to the amount that we can kmap at once */
++ wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT);
++
+ /* hard limit of CIFS_MAX_WSIZE */
+ wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
+
+@@ -2996,6 +3011,9 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+ if (!(server->capabilities & CAP_LARGE_READ_X))
+ rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
+
++ /* limit to the amount that we can kmap at once */
++ rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT);
++
+ /* hard limit of CIFS_MAX_RSIZE */
+ rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
+
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index db4a138..4c37ed4 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -86,9 +86,12 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
+
+ dentry = d_lookup(parent, name);
+ if (dentry) {
+- /* FIXME: check for inode number changes? */
+- if (dentry->d_inode != NULL)
++ inode = dentry->d_inode;
++ /* update inode in place if i_ino didn't change */
++ if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
++ cifs_fattr_to_inode(inode, fattr);
+ return dentry;
++ }
+ d_drop(dentry);
+ dput(dentry);
+ }
+diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
+index 69f994a..0dbe58a 100644
+--- a/fs/ecryptfs/kthread.c
++++ b/fs/ecryptfs/kthread.c
+@@ -149,7 +149,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
+ (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred);
+ if (!IS_ERR(*lower_file))
+ goto out;
+- if (flags & O_RDONLY) {
++ if ((flags & O_ACCMODE) == O_RDONLY) {
+ rc = PTR_ERR((*lower_file));
+ goto out;
+ }
+diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
+index 0dc5a3d..de42310 100644
+--- a/fs/ecryptfs/miscdev.c
++++ b/fs/ecryptfs/miscdev.c
+@@ -49,7 +49,10 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
+ mutex_lock(&ecryptfs_daemon_hash_mux);
+ /* TODO: Just use file->private_data? */
+ rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
+- BUG_ON(rc || !daemon);
++ if (rc || !daemon) {
++ mutex_unlock(&ecryptfs_daemon_hash_mux);
++ return -EINVAL;
++ }
+ mutex_lock(&daemon->mux);
+ mutex_unlock(&ecryptfs_daemon_hash_mux);
+ if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
+@@ -122,6 +125,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
+ goto out_unlock_daemon;
+ }
+ daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
++ file->private_data = daemon;
+ atomic_inc(&ecryptfs_num_miscdev_opens);
+ out_unlock_daemon:
+ mutex_unlock(&daemon->mux);
+@@ -152,9 +156,9 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
+
+ mutex_lock(&ecryptfs_daemon_hash_mux);
+ rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
+- BUG_ON(rc || !daemon);
++ if (rc || !daemon)
++ daemon = file->private_data;
+ mutex_lock(&daemon->mux);
+- BUG_ON(daemon->pid != task_pid(current));
+ BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
+ daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
+ atomic_dec(&ecryptfs_num_miscdev_opens);
+@@ -191,31 +195,32 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
+ struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
+ u16 msg_flags, struct ecryptfs_daemon *daemon)
+ {
+- int rc = 0;
++ struct ecryptfs_message *msg;
+
+- mutex_lock(&msg_ctx->mux);
+- msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size),
+- GFP_KERNEL);
+- if (!msg_ctx->msg) {
+- rc = -ENOMEM;
++ msg = kmalloc((sizeof(*msg) + data_size), GFP_KERNEL);
++ if (!msg) {
+ printk(KERN_ERR "%s: Out of memory whilst attempting "
+ "to kmalloc(%zd, GFP_KERNEL)\n", __func__,
+- (sizeof(*msg_ctx->msg) + data_size));
+- goto out_unlock;
++ (sizeof(*msg) + data_size));
++ return -ENOMEM;
+ }
++
++ mutex_lock(&msg_ctx->mux);
++ msg_ctx->msg = msg;
+ msg_ctx->msg->index = msg_ctx->index;
+ msg_ctx->msg->data_len = data_size;
+ msg_ctx->type = msg_type;
+ memcpy(msg_ctx->msg->data, data, data_size);
+ msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
+- mutex_lock(&daemon->mux);
+ list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
++ mutex_unlock(&msg_ctx->mux);
++
++ mutex_lock(&daemon->mux);
+ daemon->num_queued_msg_ctx++;
+ wake_up_interruptible(&daemon->wait);
+ mutex_unlock(&daemon->mux);
+-out_unlock:
+- mutex_unlock(&msg_ctx->mux);
+- return rc;
++
++ return 0;
+ }
+
+ /**
+@@ -246,8 +251,16 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
+ mutex_lock(&ecryptfs_daemon_hash_mux);
+ /* TODO: Just use file->private_data? */
+ rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
+- BUG_ON(rc || !daemon);
++ if (rc || !daemon) {
++ mutex_unlock(&ecryptfs_daemon_hash_mux);
++ return -EINVAL;
++ }
+ mutex_lock(&daemon->mux);
++ if (task_pid(current) != daemon->pid) {
++ mutex_unlock(&daemon->mux);
++ mutex_unlock(&ecryptfs_daemon_hash_mux);
++ return -EPERM;
++ }
+ if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
+ rc = 0;
+ mutex_unlock(&ecryptfs_daemon_hash_mux);
+@@ -284,9 +297,6 @@ check_list:
+ * message from the queue; try again */
+ goto check_list;
+ }
+- BUG_ON(euid != daemon->euid);
+- BUG_ON(current_user_ns() != daemon->user_ns);
+- BUG_ON(task_pid(current) != daemon->pid);
+ msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
+ struct ecryptfs_msg_ctx, daemon_out_list);
+ BUG_ON(!msg_ctx);
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 4d9d3a4..a6f3763 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1629,8 +1629,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ if (op == EPOLL_CTL_ADD) {
+ if (is_file_epoll(tfile)) {
+ error = -ELOOP;
+- if (ep_loop_check(ep, tfile) != 0)
++ if (ep_loop_check(ep, tfile) != 0) {
++ clear_tfile_check_list();
+ goto error_tgt_fput;
++ }
+ } else
+ list_add(&tfile->f_tfile_llink, &tfile_check_list);
+ }
+diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
+index 49cf230..24a49d4 100644
+--- a/fs/exofs/ore.c
++++ b/fs/exofs/ore.c
+@@ -735,13 +735,7 @@ static int _prepare_for_striping(struct ore_io_state *ios)
+ out:
+ ios->numdevs = devs_in_group;
+ ios->pages_consumed = cur_pg;
+- if (unlikely(ret)) {
+- if (length == ios->length)
+- return ret;
+- else
+- ios->length -= length;
+- }
+- return 0;
++ return ret;
+ }
+
+ int ore_create(struct ore_io_state *ios)
+diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
+index d222c77..fff2070 100644
+--- a/fs/exofs/ore_raid.c
++++ b/fs/exofs/ore_raid.c
+@@ -461,16 +461,12 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
+ * ios->sp2d[p][*], xor is calculated the same way. These pages are
+ * allocated/freed and don't go through cache
+ */
+-static int _read_4_write(struct ore_io_state *ios)
++static int _read_4_write_first_stripe(struct ore_io_state *ios)
+ {
+- struct ore_io_state *ios_read;
+ struct ore_striping_info read_si;
+ struct __stripe_pages_2d *sp2d = ios->sp2d;
+ u64 offset = ios->si.first_stripe_start;
+- u64 last_stripe_end;
+- unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
+- unsigned i, c, p, min_p = sp2d->pages_in_unit, max_p = -1;
+- int ret;
++ unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
+
+ if (offset == ios->offset) /* Go to start collect $200 */
+ goto read_last_stripe;
+@@ -478,6 +474,9 @@ static int _read_4_write(struct ore_io_state *ios)
+ min_p = _sp2d_min_pg(sp2d);
+ max_p = _sp2d_max_pg(sp2d);
+
++ ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
++ offset, ios->offset, min_p, max_p);
++
+ for (c = 0; ; c++) {
+ ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
+ read_si.obj_offset += min_p * PAGE_SIZE;
+@@ -512,6 +511,18 @@ static int _read_4_write(struct ore_io_state *ios)
+ }
+
+ read_last_stripe:
++ return 0;
++}
++
++static int _read_4_write_last_stripe(struct ore_io_state *ios)
++{
++ struct ore_striping_info read_si;
++ struct __stripe_pages_2d *sp2d = ios->sp2d;
++ u64 offset;
++ u64 last_stripe_end;
++ unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
++ unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
++
+ offset = ios->offset + ios->length;
+ if (offset % PAGE_SIZE)
+ _add_to_r4w_last_page(ios, &offset);
+@@ -527,15 +538,15 @@ read_last_stripe:
+ c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
+ ios->layout->mirrors_p1, read_si.par_dev, read_si.dev);
+
+- BUG_ON(ios->si.first_stripe_start + bytes_in_stripe != last_stripe_end);
+- /* unaligned IO must be within a single stripe */
+-
+ if (min_p == sp2d->pages_in_unit) {
+ /* Didn't do it yet */
+ min_p = _sp2d_min_pg(sp2d);
+ max_p = _sp2d_max_pg(sp2d);
+ }
+
++ ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
++ offset, last_stripe_end, min_p, max_p);
++
+ while (offset < last_stripe_end) {
+ struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
+
+@@ -568,6 +579,15 @@ read_last_stripe:
+ }
+
+ read_it:
++ return 0;
++}
++
++static int _read_4_write_execute(struct ore_io_state *ios)
++{
++ struct ore_io_state *ios_read;
++ unsigned i;
++ int ret;
++
+ ios_read = ios->ios_read_4_write;
+ if (!ios_read)
+ return 0;
+@@ -591,6 +611,8 @@ read_it:
+ }
+
+ _mark_read4write_pages_uptodate(ios_read, ret);
++ ore_put_io_state(ios_read);
++ ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
+ return 0;
+ }
+
+@@ -626,8 +648,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
+ /* If first stripe, Read in all read4write pages
+ * (if needed) before we calculate the first parity.
+ */
+- _read_4_write(ios);
++ _read_4_write_first_stripe(ios);
+ }
++ if (!cur_len) /* If last stripe r4w pages of last stripe */
++ _read_4_write_last_stripe(ios);
++ _read_4_write_execute(ios);
+
+ for (i = 0; i < num_pages; i++) {
+ pages[i] = _raid_page_alloc();
+@@ -654,34 +679,14 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
+
+ int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
+ {
+- struct ore_layout *layout = ios->layout;
+-
+ if (ios->parity_pages) {
++ struct ore_layout *layout = ios->layout;
+ unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
+- unsigned stripe_size = ios->si.bytes_in_stripe;
+- u64 last_stripe, first_stripe;
+
+ if (_sp2d_alloc(pages_in_unit, layout->group_width,
+ layout->parity, &ios->sp2d)) {
+ return -ENOMEM;
+ }
+-
+- /* Round io down to last full strip */
+- first_stripe = div_u64(ios->offset, stripe_size);
+- last_stripe = div_u64(ios->offset + ios->length, stripe_size);
+-
+- /* If an IO spans more then a single stripe it must end at
+- * a stripe boundary. The reminder at the end is pushed into the
+- * next IO.
+- */
+- if (last_stripe != first_stripe) {
+- ios->length = last_stripe * stripe_size - ios->offset;
+-
+- BUG_ON(!ios->length);
+- ios->nr_pages = (ios->length + PAGE_SIZE - 1) /
+- PAGE_SIZE;
+- ios->si.length = ios->length; /*make it consistent */
+- }
+ }
+ return 0;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index ab7aa3f..a93486e 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1097,7 +1097,7 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
+ }
+ if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
+ seq_printf(seq, ",max_batch_time=%u",
+- (unsigned) sbi->s_min_batch_time);
++ (unsigned) sbi->s_max_batch_time);
+ }
+
+ /*
+diff --git a/fs/fifo.c b/fs/fifo.c
+index b1a524d..cf6f434 100644
+--- a/fs/fifo.c
++++ b/fs/fifo.c
+@@ -14,7 +14,7 @@
+ #include <linux/sched.h>
+ #include <linux/pipe_fs_i.h>
+
+-static void wait_for_partner(struct inode* inode, unsigned int *cnt)
++static int wait_for_partner(struct inode* inode, unsigned int *cnt)
+ {
+ int cur = *cnt;
+
+@@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
+ if (signal_pending(current))
+ break;
+ }
++ return cur == *cnt ? -ERESTARTSYS : 0;
+ }
+
+ static void wake_up_partner(struct inode* inode)
+@@ -67,8 +68,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
+ * seen a writer */
+ filp->f_version = pipe->w_counter;
+ } else {
+- wait_for_partner(inode, &pipe->w_counter);
+- if(signal_pending(current))
++ if (wait_for_partner(inode, &pipe->w_counter))
+ goto err_rd;
+ }
+ }
+@@ -90,8 +90,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
+ wake_up_partner(inode);
+
+ if (!pipe->readers) {
+- wait_for_partner(inode, &pipe->r_counter);
+- if (signal_pending(current))
++ if (wait_for_partner(inode, &pipe->r_counter))
+ goto err_wr;
+ }
+ break;
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 2d0ca24..ebc2f4d 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -592,9 +592,15 @@ static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ spin_lock(&sbinfo->stat_lock);
+ /* If no limits set, just report 0 for max/free/used
+ * blocks, like simple_statfs() */
+- if (sbinfo->max_blocks >= 0) {
+- buf->f_blocks = sbinfo->max_blocks;
+- buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
++ if (sbinfo->spool) {
++ long free_pages;
++
++ spin_lock(&sbinfo->spool->lock);
++ buf->f_blocks = sbinfo->spool->max_hpages;
++ free_pages = sbinfo->spool->max_hpages
++ - sbinfo->spool->used_hpages;
++ buf->f_bavail = buf->f_bfree = free_pages;
++ spin_unlock(&sbinfo->spool->lock);
+ buf->f_files = sbinfo->max_inodes;
+ buf->f_ffree = sbinfo->free_inodes;
+ }
+@@ -610,6 +616,10 @@ static void hugetlbfs_put_super(struct super_block *sb)
+
+ if (sbi) {
+ sb->s_fs_info = NULL;
++
++ if (sbi->spool)
++ hugepage_put_subpool(sbi->spool);
++
+ kfree(sbi);
+ }
+ }
+@@ -841,10 +851,14 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
+ sb->s_fs_info = sbinfo;
+ sbinfo->hstate = config.hstate;
+ spin_lock_init(&sbinfo->stat_lock);
+- sbinfo->max_blocks = config.nr_blocks;
+- sbinfo->free_blocks = config.nr_blocks;
+ sbinfo->max_inodes = config.nr_inodes;
+ sbinfo->free_inodes = config.nr_inodes;
++ sbinfo->spool = NULL;
++ if (config.nr_blocks != -1) {
++ sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
++ if (!sbinfo->spool)
++ goto out_free;
++ }
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_blocksize = huge_page_size(config.hstate);
+ sb->s_blocksize_bits = huge_page_shift(config.hstate);
+@@ -864,38 +878,12 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
+ sb->s_root = root;
+ return 0;
+ out_free:
++ if (sbinfo->spool)
++ kfree(sbinfo->spool);
+ kfree(sbinfo);
+ return -ENOMEM;
+ }
+
+-int hugetlb_get_quota(struct address_space *mapping, long delta)
+-{
+- int ret = 0;
+- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
+-
+- if (sbinfo->free_blocks > -1) {
+- spin_lock(&sbinfo->stat_lock);
+- if (sbinfo->free_blocks - delta >= 0)
+- sbinfo->free_blocks -= delta;
+- else
+- ret = -ENOMEM;
+- spin_unlock(&sbinfo->stat_lock);
+- }
+-
+- return ret;
+-}
+-
+-void hugetlb_put_quota(struct address_space *mapping, long delta)
+-{
+- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
+-
+- if (sbinfo->free_blocks > -1) {
+- spin_lock(&sbinfo->stat_lock);
+- sbinfo->free_blocks += delta;
+- spin_unlock(&sbinfo->stat_lock);
+- }
+-}
+-
+ static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+ {
+diff --git a/fs/locks.c b/fs/locks.c
+index 0d68f1f..6a64f15 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
+ case F_WRLCK:
+ return generic_add_lease(filp, arg, flp);
+ default:
+- BUG();
++ return -EINVAL;
+ }
+ }
+ EXPORT_SYMBOL(generic_setlease);
+diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
+index 47d1c6f..b122af8 100644
+--- a/fs/nfs/idmap.c
++++ b/fs/nfs/idmap.c
+@@ -318,12 +318,12 @@ struct idmap_hashent {
+ unsigned long ih_expires;
+ __u32 ih_id;
+ size_t ih_namelen;
+- char ih_name[IDMAP_NAMESZ];
++ const char *ih_name;
+ };
+
+ struct idmap_hashtable {
+ __u8 h_type;
+- struct idmap_hashent h_entries[IDMAP_HASH_SZ];
++ struct idmap_hashent *h_entries;
+ };
+
+ struct idmap {
+@@ -378,6 +378,28 @@ nfs_idmap_new(struct nfs_client *clp)
+ return 0;
+ }
+
++static void
++idmap_alloc_hashtable(struct idmap_hashtable *h)
++{
++ if (h->h_entries != NULL)
++ return;
++ h->h_entries = kcalloc(IDMAP_HASH_SZ,
++ sizeof(*h->h_entries),
++ GFP_KERNEL);
++}
++
++static void
++idmap_free_hashtable(struct idmap_hashtable *h)
++{
++ int i;
++
++ if (h->h_entries == NULL)
++ return;
++ for (i = 0; i < IDMAP_HASH_SZ; i++)
++ kfree(h->h_entries[i].ih_name);
++ kfree(h->h_entries);
++}
++
+ void
+ nfs_idmap_delete(struct nfs_client *clp)
+ {
+@@ -387,6 +409,8 @@ nfs_idmap_delete(struct nfs_client *clp)
+ return;
+ rpc_unlink(idmap->idmap_dentry);
+ clp->cl_idmap = NULL;
++ idmap_free_hashtable(&idmap->idmap_user_hash);
++ idmap_free_hashtable(&idmap->idmap_group_hash);
+ kfree(idmap);
+ }
+
+@@ -396,6 +420,8 @@ nfs_idmap_delete(struct nfs_client *clp)
+ static inline struct idmap_hashent *
+ idmap_name_hash(struct idmap_hashtable* h, const char *name, size_t len)
+ {
++ if (h->h_entries == NULL)
++ return NULL;
+ return &h->h_entries[fnvhash32(name, len) % IDMAP_HASH_SZ];
+ }
+
+@@ -404,6 +430,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len)
+ {
+ struct idmap_hashent *he = idmap_name_hash(h, name, len);
+
++ if (he == NULL)
++ return NULL;
+ if (he->ih_namelen != len || memcmp(he->ih_name, name, len) != 0)
+ return NULL;
+ if (time_after(jiffies, he->ih_expires))
+@@ -414,6 +442,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len)
+ static inline struct idmap_hashent *
+ idmap_id_hash(struct idmap_hashtable* h, __u32 id)
+ {
++ if (h->h_entries == NULL)
++ return NULL;
+ return &h->h_entries[fnvhash32(&id, sizeof(id)) % IDMAP_HASH_SZ];
+ }
+
+@@ -421,6 +451,9 @@ static struct idmap_hashent *
+ idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
+ {
+ struct idmap_hashent *he = idmap_id_hash(h, id);
++
++ if (he == NULL)
++ return NULL;
+ if (he->ih_id != id || he->ih_namelen == 0)
+ return NULL;
+ if (time_after(jiffies, he->ih_expires))
+@@ -436,12 +469,14 @@ idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
+ static inline struct idmap_hashent *
+ idmap_alloc_name(struct idmap_hashtable *h, char *name, size_t len)
+ {
++ idmap_alloc_hashtable(h);
+ return idmap_name_hash(h, name, len);
+ }
+
+ static inline struct idmap_hashent *
+ idmap_alloc_id(struct idmap_hashtable *h, __u32 id)
+ {
++ idmap_alloc_hashtable(h);
+ return idmap_id_hash(h, id);
+ }
+
+@@ -449,9 +484,14 @@ static void
+ idmap_update_entry(struct idmap_hashent *he, const char *name,
+ size_t namelen, __u32 id)
+ {
++ char *str = kmalloc(namelen + 1, GFP_KERNEL);
++ if (str == NULL)
++ return;
++ kfree(he->ih_name);
+ he->ih_id = id;
+- memcpy(he->ih_name, name, namelen);
+- he->ih_name[namelen] = '\0';
++ memcpy(str, name, namelen);
++ str[namelen] = '\0';
++ he->ih_name = str;
+ he->ih_namelen = namelen;
+ he->ih_expires = jiffies + nfs_idmap_cache_timeout;
+ }
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 66020ac..07354b7 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1186,8 +1186,9 @@ restart:
+ spin_lock(&state->state_lock);
+ list_for_each_entry(lock, &state->lock_states, ls_locks) {
+ if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
+- printk("%s: Lock reclaim failed!\n",
+- __func__);
++ pr_warn_ratelimited("NFS: "
++ "%s: Lock reclaim "
++ "failed!\n", __func__);
+ }
+ spin_unlock(&state->state_lock);
+ nfs4_put_open_state(state);
+diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
+index 55d0128..a03ee52 100644
+--- a/fs/nfs/objlayout/objio_osd.c
++++ b/fs/nfs/objlayout/objio_osd.c
+@@ -433,7 +433,10 @@ int objio_read_pagelist(struct nfs_read_data *rdata)
+ objios->ios->done = _read_done;
+ dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
+ rdata->args.offset, rdata->args.count);
+- return ore_read(objios->ios);
++ ret = ore_read(objios->ios);
++ if (unlikely(ret))
++ objio_free_result(&objios->oir);
++ return ret;
+ }
+
+ /*
+@@ -464,8 +467,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
+ struct objio_state *objios = priv;
+ struct nfs_write_data *wdata = objios->oir.rpcdata;
+ pgoff_t index = offset / PAGE_SIZE;
+- struct page *page = find_get_page(wdata->inode->i_mapping, index);
++ struct page *page;
++ loff_t i_size = i_size_read(wdata->inode);
++
++ if (offset >= i_size) {
++ *uptodate = true;
++ dprintk("%s: g_zero_page index=0x%lx\n", __func__, index);
++ return ZERO_PAGE(0);
++ }
+
++ page = find_get_page(wdata->inode->i_mapping, index);
+ if (!page) {
+ page = find_or_create_page(wdata->inode->i_mapping,
+ index, GFP_NOFS);
+@@ -486,8 +497,10 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
+
+ static void __r4w_put_page(void *priv, struct page *page)
+ {
+- dprintk("%s: index=0x%lx\n", __func__, page->index);
+- page_cache_release(page);
++ dprintk("%s: index=0x%lx\n", __func__,
++ (page == ZERO_PAGE(0)) ? -1UL : page->index);
++ if (ZERO_PAGE(0) != page)
++ page_cache_release(page);
+ return;
+ }
+
+@@ -517,8 +530,10 @@ int objio_write_pagelist(struct nfs_write_data *wdata, int how)
+ dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
+ wdata->args.offset, wdata->args.count);
+ ret = ore_write(objios->ios);
+- if (unlikely(ret))
++ if (unlikely(ret)) {
++ objio_free_result(&objios->oir);
+ return ret;
++ }
+
+ if (objios->sync)
+ _write_done(objios->ios, objios);
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 07ee5b4..1c7d45e 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ if (ret < 0)
+ mlog_errno(ret);
+
+- if (file->f_flags & O_SYNC)
++ if (file && (file->f_flags & O_SYNC))
+ handle->h_sync = 1;
+
+ ocfs2_commit_trans(osb, handle);
+diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
+index fbb0b47..d5378d0 100644
+--- a/fs/ramfs/file-nommu.c
++++ b/fs/ramfs/file-nommu.c
+@@ -110,6 +110,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
+
+ /* prevent the page from being discarded on memory pressure */
+ SetPageDirty(page);
++ SetPageUptodate(page);
+
+ unlock_page(page);
+ put_page(page);
+diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
+index 6094c5a..b73ecd8 100644
+--- a/fs/ubifs/sb.c
++++ b/fs/ubifs/sb.c
+@@ -715,8 +715,12 @@ static int fixup_free_space(struct ubifs_info *c)
+ lnum = ubifs_next_log_lnum(c, lnum);
+ }
+
+- /* Fixup the current log head */
+- err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
++ /*
++ * Fixup the log head which contains the only a CS node at the
++ * beginning.
++ */
++ err = fixup_leb(c, c->lhead_lnum,
++ ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size));
+ if (err)
+ goto out;
+
+diff --git a/include/linux/Kbuild b/include/linux/Kbuild
+index bd21ecd..a3ce901 100644
+--- a/include/linux/Kbuild
++++ b/include/linux/Kbuild
+@@ -268,6 +268,7 @@ header-y += netfilter_ipv4.h
+ header-y += netfilter_ipv6.h
+ header-y += netlink.h
+ header-y += netrom.h
++header-y += nfc.h
+ header-y += nfs.h
+ header-y += nfs2.h
+ header-y += nfs3.h
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index fd0dc30..cc07d27 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -165,6 +165,7 @@ enum hrtimer_base_type {
+ * @lock: lock protecting the base and associated clock bases
+ * and timers
+ * @active_bases: Bitfield to mark bases with active timers
++ * @clock_was_set: Indicates that clock was set from irq context.
+ * @expires_next: absolute time of the next event which was scheduled
+ * via clock_set_next_event()
+ * @hres_active: State of high resolution mode
+@@ -177,7 +178,8 @@ enum hrtimer_base_type {
+ */
+ struct hrtimer_cpu_base {
+ raw_spinlock_t lock;
+- unsigned long active_bases;
++ unsigned int active_bases;
++ unsigned int clock_was_set;
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ ktime_t expires_next;
+ int hres_active;
+@@ -286,6 +288,8 @@ extern void hrtimer_peek_ahead_timers(void);
+ # define MONOTONIC_RES_NSEC HIGH_RES_NSEC
+ # define KTIME_MONOTONIC_RES KTIME_HIGH_RES
+
++extern void clock_was_set_delayed(void);
++
+ #else
+
+ # define MONOTONIC_RES_NSEC LOW_RES_NSEC
+@@ -306,6 +310,9 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
+ {
+ return 0;
+ }
++
++static inline void clock_was_set_delayed(void) { }
++
+ #endif
+
+ extern void clock_was_set(void);
+@@ -320,6 +327,7 @@ extern ktime_t ktime_get(void);
+ extern ktime_t ktime_get_real(void);
+ extern ktime_t ktime_get_boottime(void);
+ extern ktime_t ktime_get_monotonic_offset(void);
++extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot);
+
+ DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
+
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index d9d6c86..c5ed2f1 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -14,6 +14,15 @@ struct user_struct;
+ #include <linux/shm.h>
+ #include <asm/tlbflush.h>
+
++struct hugepage_subpool {
++ spinlock_t lock;
++ long count;
++ long max_hpages, used_hpages;
++};
++
++struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
++void hugepage_put_subpool(struct hugepage_subpool *spool);
++
+ int PageHuge(struct page *page);
+
+ void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
+@@ -138,12 +147,11 @@ struct hugetlbfs_config {
+ };
+
+ struct hugetlbfs_sb_info {
+- long max_blocks; /* blocks allowed */
+- long free_blocks; /* blocks free */
+ long max_inodes; /* inodes allowed */
+ long free_inodes; /* inodes free */
+ spinlock_t stat_lock;
+ struct hstate *hstate;
++ struct hugepage_subpool *spool;
+ };
+
+
+@@ -166,8 +174,6 @@ extern const struct file_operations hugetlbfs_file_operations;
+ extern const struct vm_operations_struct hugetlb_vm_ops;
+ struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
+ struct user_struct **user, int creat_flags);
+-int hugetlb_get_quota(struct address_space *mapping, long delta);
+-void hugetlb_put_quota(struct address_space *mapping, long delta);
+
+ static inline int is_file_hugepages(struct file *file)
+ {
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 188cb2f..905b1e1 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -652,7 +652,7 @@ typedef struct pglist_data {
+ range, including holes */
+ int node_id;
+ wait_queue_head_t kswapd_wait;
+- struct task_struct *kswapd;
++ struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
+ int kswapd_max_order;
+ enum zone_type classzone_idx;
+ } pg_data_t;
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index c0cfa0d..7cda65b 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -176,8 +176,6 @@ enum pci_dev_flags {
+ PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
+ /* Provide indication device is assigned by a Virtual Machine Manager */
+ PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
+- /* Device causes system crash if in D3 during S3 sleep */
+- PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8,
+ };
+
+ enum pci_irq_reroute_variant {
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 1c4f3e9..5afa2a3 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1892,6 +1892,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
+ }
+ #endif
+
++#ifdef CONFIG_NO_HZ
++void calc_load_enter_idle(void);
++void calc_load_exit_idle(void);
++#else
++static inline void calc_load_enter_idle(void) { }
++static inline void calc_load_exit_idle(void) { }
++#endif /* CONFIG_NO_HZ */
++
+ #ifndef CONFIG_CPUMASK_OFFSTACK
+ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+ {
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index bdb4590..53dc7e7 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -213,11 +213,8 @@ enum {
+ /* device driver is going to provide hardware time stamp */
+ SKBTX_IN_PROGRESS = 1 << 2,
+
+- /* ensure the originating sk reference is available on driver level */
+- SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
+-
+ /* device driver supports TX zero-copy buffers */
+- SKBTX_DEV_ZEROCOPY = 1 << 4,
++ SKBTX_DEV_ZEROCOPY = 1 << 3,
+ };
+
+ /*
+diff --git a/include/linux/timex.h b/include/linux/timex.h
+index aa60fe7..08e90fb 100644
+--- a/include/linux/timex.h
++++ b/include/linux/timex.h
+@@ -266,7 +266,7 @@ static inline int ntp_synced(void)
+ /* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
+ extern u64 tick_length;
+
+-extern void second_overflow(void);
++extern int second_overflow(unsigned long secs);
+ extern void update_ntp_one_tick(void);
+ extern int do_adjtimex(struct timex *);
+ extern void hardpps(const struct timespec *, const struct timespec *);
+diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
+index 6a308d4..1e100c6 100644
+--- a/include/scsi/libsas.h
++++ b/include/scsi/libsas.h
+@@ -159,6 +159,8 @@ enum ata_command_set {
+ ATAPI_COMMAND_SET = 1,
+ };
+
++#define ATA_RESP_FIS_SIZE 24
++
+ struct sata_device {
+ enum ata_command_set command_set;
+ struct smp_resp rps_resp; /* report_phy_sata_resp */
+@@ -170,7 +172,7 @@ struct sata_device {
+
+ struct ata_port *ap;
+ struct ata_host ata_host;
+- struct ata_taskfile tf;
++ u8 fis[ATA_RESP_FIS_SIZE];
+ u32 sstatus;
+ u32 serror;
+ u32 scontrol;
+@@ -486,7 +488,7 @@ enum exec_status {
+ */
+ struct ata_task_resp {
+ u16 frame_len;
+- u8 ending_fis[24]; /* dev to host or data-in */
++ u8 ending_fis[ATA_RESP_FIS_SIZE]; /* dev to host or data-in */
+ u32 sstatus;
+ u32 serror;
+ u32 scontrol;
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index ae34bf5..6db7a5e 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -657,6 +657,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+ return 0;
+ }
+
++static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
++{
++ ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
++ ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
++
++ return ktime_get_update_offsets(offs_real, offs_boot);
++}
++
+ /*
+ * Retrigger next event is called after clock was set
+ *
+@@ -665,22 +673,12 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+ static void retrigger_next_event(void *arg)
+ {
+ struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
+- struct timespec realtime_offset, xtim, wtm, sleep;
+
+ if (!hrtimer_hres_active())
+ return;
+
+- /* Optimized out for !HIGH_RES */
+- get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
+- set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
+-
+- /* Adjust CLOCK_REALTIME offset */
+ raw_spin_lock(&base->lock);
+- base->clock_base[HRTIMER_BASE_REALTIME].offset =
+- timespec_to_ktime(realtime_offset);
+- base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
+- timespec_to_ktime(sleep);
+-
++ hrtimer_update_base(base);
+ hrtimer_force_reprogram(base, 0);
+ raw_spin_unlock(&base->lock);
+ }
+@@ -710,13 +708,25 @@ static int hrtimer_switch_to_hres(void)
+ base->clock_base[i].resolution = KTIME_HIGH_RES;
+
+ tick_setup_sched_timer();
+-
+ /* "Retrigger" the interrupt to get things going */
+ retrigger_next_event(NULL);
+ local_irq_restore(flags);
+ return 1;
+ }
+
++/*
++ * Called from timekeeping code to reprogramm the hrtimer interrupt
++ * device. If called from the timer interrupt context we defer it to
++ * softirq context.
++ */
++void clock_was_set_delayed(void)
++{
++ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
++
++ cpu_base->clock_was_set = 1;
++ __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++}
++
+ #else
+
+ static inline int hrtimer_hres_active(void) { return 0; }
+@@ -1250,11 +1260,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+ cpu_base->nr_events++;
+ dev->next_event.tv64 = KTIME_MAX;
+
+- entry_time = now = ktime_get();
++ raw_spin_lock(&cpu_base->lock);
++ entry_time = now = hrtimer_update_base(cpu_base);
+ retry:
+ expires_next.tv64 = KTIME_MAX;
+-
+- raw_spin_lock(&cpu_base->lock);
+ /*
+ * We set expires_next to KTIME_MAX here with cpu_base->lock
+ * held to prevent that a timer is enqueued in our queue via
+@@ -1330,8 +1339,12 @@ retry:
+ * We need to prevent that we loop forever in the hrtimer
+ * interrupt routine. We give it 3 attempts to avoid
+ * overreacting on some spurious event.
++ *
++ * Acquire base lock for updating the offsets and retrieving
++ * the current time.
+ */
+- now = ktime_get();
++ raw_spin_lock(&cpu_base->lock);
++ now = hrtimer_update_base(cpu_base);
+ cpu_base->nr_retries++;
+ if (++retries < 3)
+ goto retry;
+@@ -1343,6 +1356,7 @@ retry:
+ */
+ cpu_base->nr_hangs++;
+ cpu_base->hang_detected = 1;
++ raw_spin_unlock(&cpu_base->lock);
+ delta = ktime_sub(now, entry_time);
+ if (delta.tv64 > cpu_base->max_hang_time.tv64)
+ cpu_base->max_hang_time = delta;
+@@ -1395,6 +1409,13 @@ void hrtimer_peek_ahead_timers(void)
+
+ static void run_hrtimer_softirq(struct softirq_action *h)
+ {
++ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
++
++ if (cpu_base->clock_was_set) {
++ cpu_base->clock_was_set = 0;
++ clock_was_set();
++ }
++
+ hrtimer_peek_ahead_timers();
+ }
+
+diff --git a/kernel/power/swap.c b/kernel/power/swap.c
+index b313086..64f8f97 100644
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -6,7 +6,7 @@
+ *
+ * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+- * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
++ * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
+ *
+ * This file is released under the GPLv2.
+ *
+@@ -283,14 +283,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
+ return -ENOSPC;
+
+ if (bio_chain) {
+- src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
++ src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
++ __GFP_NORETRY);
+ if (src) {
+ copy_page(src, buf);
+ } else {
+ ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
+ if (ret)
+ return ret;
+- src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
++ src = (void *)__get_free_page(__GFP_WAIT |
++ __GFP_NOWARN |
++ __GFP_NORETRY);
+ if (src) {
+ copy_page(src, buf);
+ } else {
+@@ -368,12 +371,17 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
+ clear_page(handle->cur);
+ handle->cur_swap = offset;
+ handle->k = 0;
+- }
+- if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
+- error = hib_wait_on_bio_chain(bio_chain);
+- if (error)
+- goto out;
+- handle->reqd_free_pages = reqd_free_pages();
++
++ if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
++ error = hib_wait_on_bio_chain(bio_chain);
++ if (error)
++ goto out;
++ /*
++ * Recalculate the number of required free pages, to
++ * make sure we never take more than half.
++ */
++ handle->reqd_free_pages = reqd_free_pages();
++ }
+ }
+ out:
+ return error;
+@@ -420,8 +428,9 @@ static int swap_writer_finish(struct swap_map_handle *handle,
+ /* Maximum number of threads for compression/decompression. */
+ #define LZO_THREADS 3
+
+-/* Maximum number of pages for read buffering. */
+-#define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8)
++/* Minimum/maximum number of pages for read buffering. */
++#define LZO_MIN_RD_PAGES 1024
++#define LZO_MAX_RD_PAGES 8192
+
+
+ /**
+@@ -632,12 +641,6 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ }
+
+ /*
+- * Adjust number of free pages after all allocations have been done.
+- * We don't want to run out of pages when writing.
+- */
+- handle->reqd_free_pages = reqd_free_pages();
+-
+- /*
+ * Start the CRC32 thread.
+ */
+ init_waitqueue_head(&crc->go);
+@@ -658,6 +661,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ goto out_clean;
+ }
+
++ /*
++ * Adjust the number of required free pages after all allocations have
++ * been done. We don't want to run out of pages when writing.
++ */
++ handle->reqd_free_pages = reqd_free_pages();
++
+ printk(KERN_INFO
+ "PM: Using %u thread(s) for compression.\n"
+ "PM: Compressing and saving image data (%u pages) ... ",
+@@ -1067,7 +1076,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ unsigned i, thr, run_threads, nr_threads;
+ unsigned ring = 0, pg = 0, ring_size = 0,
+ have = 0, want, need, asked = 0;
+- unsigned long read_pages;
++ unsigned long read_pages = 0;
+ unsigned char **page = NULL;
+ struct dec_data *data = NULL;
+ struct crc_data *crc = NULL;
+@@ -1079,7 +1088,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ nr_threads = num_online_cpus() - 1;
+ nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
+
+- page = vmalloc(sizeof(*page) * LZO_READ_PAGES);
++ page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
+ if (!page) {
+ printk(KERN_ERR "PM: Failed to allocate LZO page\n");
+ ret = -ENOMEM;
+@@ -1144,15 +1153,22 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ }
+
+ /*
+- * Adjust number of pages for read buffering, in case we are short.
++ * Set the number of pages for read buffering.
++ * This is complete guesswork, because we'll only know the real
++ * picture once prepare_image() is called, which is much later on
++ * during the image load phase. We'll assume the worst case and
++ * say that none of the image pages are from high memory.
+ */
+- read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
+- read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
++ if (low_free_pages() > snapshot_get_image_size())
++ read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
++ read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
+
+ for (i = 0; i < read_pages; i++) {
+ page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
+ __GFP_WAIT | __GFP_HIGH :
+- __GFP_WAIT);
++ __GFP_WAIT | __GFP_NOWARN |
++ __GFP_NORETRY);
++
+ if (!page[i]) {
+ if (i < LZO_CMP_PAGES) {
+ ring_size = i;
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 576a27f..52ac69b 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -1885,7 +1885,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
+
+ #endif
+
+-static void calc_load_account_idle(struct rq *this_rq);
+ static void update_sysctl(void);
+ static int get_update_sysctl_factor(void);
+ static void update_cpu_load(struct rq *this_rq);
+@@ -3401,11 +3400,73 @@ unsigned long this_cpu_load(void)
+ }
+
+
++/*
++ * Global load-average calculations
++ *
++ * We take a distributed and async approach to calculating the global load-avg
++ * in order to minimize overhead.
++ *
++ * The global load average is an exponentially decaying average of nr_running +
++ * nr_uninterruptible.
++ *
++ * Once every LOAD_FREQ:
++ *
++ * nr_active = 0;
++ * for_each_possible_cpu(cpu)
++ * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
++ *
++ * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
++ *
++ * Due to a number of reasons the above turns in the mess below:
++ *
++ * - for_each_possible_cpu() is prohibitively expensive on machines with
++ * serious number of cpus, therefore we need to take a distributed approach
++ * to calculating nr_active.
++ *
++ * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
++ * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
++ *
++ * So assuming nr_active := 0 when we start out -- true per definition, we
++ * can simply take per-cpu deltas and fold those into a global accumulate
++ * to obtain the same result. See calc_load_fold_active().
++ *
++ * Furthermore, in order to avoid synchronizing all per-cpu delta folding
++ * across the machine, we assume 10 ticks is sufficient time for every
++ * cpu to have completed this task.
++ *
++ * This places an upper-bound on the IRQ-off latency of the machine. Then
++ * again, being late doesn't loose the delta, just wrecks the sample.
++ *
++ * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
++ * this would add another cross-cpu cacheline miss and atomic operation
++ * to the wakeup path. Instead we increment on whatever cpu the task ran
++ * when it went into uninterruptible state and decrement on whatever cpu
++ * did the wakeup. This means that only the sum of nr_uninterruptible over
++ * all cpus yields the correct result.
++ *
++ * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
++ */
++
+ /* Variables and functions for calc_load */
+ static atomic_long_t calc_load_tasks;
+ static unsigned long calc_load_update;
+ unsigned long avenrun[3];
+-EXPORT_SYMBOL(avenrun);
++EXPORT_SYMBOL(avenrun); /* should be removed */
++
++/**
++ * get_avenrun - get the load average array
++ * @loads: pointer to dest load array
++ * @offset: offset to add
++ * @shift: shift count to shift the result left
++ *
++ * These values are estimates at best, so no need for locking.
++ */
++void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
++{
++ loads[0] = (avenrun[0] + offset) << shift;
++ loads[1] = (avenrun[1] + offset) << shift;
++ loads[2] = (avenrun[2] + offset) << shift;
++}
+
+ static long calc_load_fold_active(struct rq *this_rq)
+ {
+@@ -3422,6 +3483,9 @@ static long calc_load_fold_active(struct rq *this_rq)
+ return delta;
+ }
+
++/*
++ * a1 = a0 * e + a * (1 - e)
++ */
+ static unsigned long
+ calc_load(unsigned long load, unsigned long exp, unsigned long active)
+ {
+@@ -3433,30 +3497,118 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
+
+ #ifdef CONFIG_NO_HZ
+ /*
+- * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
++ * Handle NO_HZ for the global load-average.
++ *
++ * Since the above described distributed algorithm to compute the global
++ * load-average relies on per-cpu sampling from the tick, it is affected by
++ * NO_HZ.
++ *
++ * The basic idea is to fold the nr_active delta into a global idle-delta upon
++ * entering NO_HZ state such that we can include this as an 'extra' cpu delta
++ * when we read the global state.
++ *
++ * Obviously reality has to ruin such a delightfully simple scheme:
++ *
++ * - When we go NO_HZ idle during the window, we can negate our sample
++ * contribution, causing under-accounting.
++ *
++ * We avoid this by keeping two idle-delta counters and flipping them
++ * when the window starts, thus separating old and new NO_HZ load.
++ *
++ * The only trick is the slight shift in index flip for read vs write.
++ *
++ * 0s 5s 10s 15s
++ * +10 +10 +10 +10
++ * |-|-----------|-|-----------|-|-----------|-|
++ * r:0 0 1 1 0 0 1 1 0
++ * w:0 1 1 0 0 1 1 0 0
++ *
++ * This ensures we'll fold the old idle contribution in this window while
++ * accumlating the new one.
++ *
++ * - When we wake up from NO_HZ idle during the window, we push up our
++ * contribution, since we effectively move our sample point to a known
++ * busy state.
++ *
++ * This is solved by pushing the window forward, and thus skipping the
++ * sample, for this cpu (effectively using the idle-delta for this cpu which
++ * was in effect at the time the window opened). This also solves the issue
++ * of having to deal with a cpu having been in NOHZ idle for multiple
++ * LOAD_FREQ intervals.
+ *
+ * When making the ILB scale, we should try to pull this in as well.
+ */
+-static atomic_long_t calc_load_tasks_idle;
++static atomic_long_t calc_load_idle[2];
++static int calc_load_idx;
+
+-static void calc_load_account_idle(struct rq *this_rq)
++static inline int calc_load_write_idx(void)
+ {
++ int idx = calc_load_idx;
++
++ /*
++ * See calc_global_nohz(), if we observe the new index, we also
++ * need to observe the new update time.
++ */
++ smp_rmb();
++
++ /*
++ * If the folding window started, make sure we start writing in the
++ * next idle-delta.
++ */
++ if (!time_before(jiffies, calc_load_update))
++ idx++;
++
++ return idx & 1;
++}
++
++static inline int calc_load_read_idx(void)
++{
++ return calc_load_idx & 1;
++}
++
++void calc_load_enter_idle(void)
++{
++ struct rq *this_rq = this_rq();
+ long delta;
+
++ /*
++ * We're going into NOHZ mode, if there's any pending delta, fold it
++ * into the pending idle delta.
++ */
+ delta = calc_load_fold_active(this_rq);
+- if (delta)
+- atomic_long_add(delta, &calc_load_tasks_idle);
++ if (delta) {
++ int idx = calc_load_write_idx();
++ atomic_long_add(delta, &calc_load_idle[idx]);
++ }
+ }
+
+-static long calc_load_fold_idle(void)
++void calc_load_exit_idle(void)
+ {
+- long delta = 0;
++ struct rq *this_rq = this_rq();
++
++ /*
++ * If we're still before the sample window, we're done.
++ */
++ if (time_before(jiffies, this_rq->calc_load_update))
++ return;
+
+ /*
+- * Its got a race, we don't care...
++ * We woke inside or after the sample window, this means we're already
++ * accounted through the nohz accounting, so skip the entire deal and
++ * sync up for the next window.
+ */
+- if (atomic_long_read(&calc_load_tasks_idle))
+- delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
++ this_rq->calc_load_update = calc_load_update;
++ if (time_before(jiffies, this_rq->calc_load_update + 10))
++ this_rq->calc_load_update += LOAD_FREQ;
++}
++
++static long calc_load_fold_idle(void)
++{
++ int idx = calc_load_read_idx();
++ long delta = 0;
++
++ if (atomic_long_read(&calc_load_idle[idx]))
++ delta = atomic_long_xchg(&calc_load_idle[idx], 0);
+
+ return delta;
+ }
+@@ -3542,66 +3694,39 @@ static void calc_global_nohz(void)
+ {
+ long delta, active, n;
+
+- /*
+- * If we crossed a calc_load_update boundary, make sure to fold
+- * any pending idle changes, the respective CPUs might have
+- * missed the tick driven calc_load_account_active() update
+- * due to NO_HZ.
+- */
+- delta = calc_load_fold_idle();
+- if (delta)
+- atomic_long_add(delta, &calc_load_tasks);
+-
+- /*
+- * It could be the one fold was all it took, we done!
+- */
+- if (time_before(jiffies, calc_load_update + 10))
+- return;
+-
+- /*
+- * Catch-up, fold however many we are behind still
+- */
+- delta = jiffies - calc_load_update - 10;
+- n = 1 + (delta / LOAD_FREQ);
++ if (!time_before(jiffies, calc_load_update + 10)) {
++ /*
++ * Catch-up, fold however many we are behind still
++ */
++ delta = jiffies - calc_load_update - 10;
++ n = 1 + (delta / LOAD_FREQ);
+
+- active = atomic_long_read(&calc_load_tasks);
+- active = active > 0 ? active * FIXED_1 : 0;
++ active = atomic_long_read(&calc_load_tasks);
++ active = active > 0 ? active * FIXED_1 : 0;
+
+- avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
+- avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
+- avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
++ avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
++ avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
++ avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
+
+- calc_load_update += n * LOAD_FREQ;
+-}
+-#else
+-static void calc_load_account_idle(struct rq *this_rq)
+-{
+-}
++ calc_load_update += n * LOAD_FREQ;
++ }
+
+-static inline long calc_load_fold_idle(void)
+-{
+- return 0;
++ /*
++ * Flip the idle index...
++ *
++ * Make sure we first write the new time then flip the index, so that
++ * calc_load_write_idx() will see the new time when it reads the new
++ * index, this avoids a double flip messing things up.
++ */
++ smp_wmb();
++ calc_load_idx++;
+ }
++#else /* !CONFIG_NO_HZ */
+
+-static void calc_global_nohz(void)
+-{
+-}
+-#endif
++static inline long calc_load_fold_idle(void) { return 0; }
++static inline void calc_global_nohz(void) { }
+
+-/**
+- * get_avenrun - get the load average array
+- * @loads: pointer to dest load array
+- * @offset: offset to add
+- * @shift: shift count to shift the result left
+- *
+- * These values are estimates at best, so no need for locking.
+- */
+-void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+-{
+- loads[0] = (avenrun[0] + offset) << shift;
+- loads[1] = (avenrun[1] + offset) << shift;
+- loads[2] = (avenrun[2] + offset) << shift;
+-}
++#endif /* CONFIG_NO_HZ */
+
+ /*
+ * calc_load - update the avenrun load estimates 10 ticks after the
+@@ -3609,11 +3734,18 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+ */
+ void calc_global_load(unsigned long ticks)
+ {
+- long active;
++ long active, delta;
+
+ if (time_before(jiffies, calc_load_update + 10))
+ return;
+
++ /*
++ * Fold the 'old' idle-delta to include all NO_HZ cpus.
++ */
++ delta = calc_load_fold_idle();
++ if (delta)
++ atomic_long_add(delta, &calc_load_tasks);
++
+ active = atomic_long_read(&calc_load_tasks);
+ active = active > 0 ? active * FIXED_1 : 0;
+
+@@ -3624,12 +3756,7 @@ void calc_global_load(unsigned long ticks)
+ calc_load_update += LOAD_FREQ;
+
+ /*
+- * Account one period with whatever state we found before
+- * folding in the nohz state and ageing the entire idle period.
+- *
+- * This avoids loosing a sample when we go idle between
+- * calc_load_account_active() (10 ticks ago) and now and thus
+- * under-accounting.
++ * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
+ */
+ calc_global_nohz();
+ }
+@@ -3646,7 +3773,6 @@ static void calc_load_account_active(struct rq *this_rq)
+ return;
+
+ delta = calc_load_fold_active(this_rq);
+- delta += calc_load_fold_idle();
+ if (delta)
+ atomic_long_add(delta, &calc_load_tasks);
+
+@@ -3654,6 +3780,10 @@ static void calc_load_account_active(struct rq *this_rq)
+ }
+
+ /*
++ * End of global load-average stuff
++ */
++
++/*
+ * The exact cpuload at various idx values, calculated at every tick would be
+ * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
+ *
+diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
+index 0a51882..be92bfe 100644
+--- a/kernel/sched_idletask.c
++++ b/kernel/sched_idletask.c
+@@ -23,7 +23,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
+ static struct task_struct *pick_next_task_idle(struct rq *rq)
+ {
+ schedstat_inc(rq, sched_goidle);
+- calc_load_account_idle(rq);
+ return rq->idle;
+ }
+
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index 4b85a7a..f1eb182 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -31,8 +31,6 @@ unsigned long tick_nsec;
+ u64 tick_length;
+ static u64 tick_length_base;
+
+-static struct hrtimer leap_timer;
+-
+ #define MAX_TICKADJ 500LL /* usecs */
+ #define MAX_TICKADJ_SCALED \
+ (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
+@@ -350,60 +348,60 @@ void ntp_clear(void)
+ }
+
+ /*
+- * Leap second processing. If in leap-insert state at the end of the
+- * day, the system clock is set back one second; if in leap-delete
+- * state, the system clock is set ahead one second.
++ * this routine handles the overflow of the microsecond field
++ *
++ * The tricky bits of code to handle the accurate clock support
++ * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
++ * They were originally developed for SUN and DEC kernels.
++ * All the kudos should go to Dave for this stuff.
++ *
++ * Also handles leap second processing, and returns leap offset
+ */
+-static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
++int second_overflow(unsigned long secs)
+ {
+- enum hrtimer_restart res = HRTIMER_NORESTART;
+-
+- write_seqlock(&xtime_lock);
++ int leap = 0;
++ s64 delta;
+
++ /*
++ * Leap second processing. If in leap-insert state at the end of the
++ * day, the system clock is set back one second; if in leap-delete
++ * state, the system clock is set ahead one second.
++ */
+ switch (time_state) {
+ case TIME_OK:
++ if (time_status & STA_INS)
++ time_state = TIME_INS;
++ else if (time_status & STA_DEL)
++ time_state = TIME_DEL;
+ break;
+ case TIME_INS:
+- timekeeping_leap_insert(-1);
+- time_state = TIME_OOP;
+- printk(KERN_NOTICE
+- "Clock: inserting leap second 23:59:60 UTC\n");
+- hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
+- res = HRTIMER_RESTART;
++ if (secs % 86400 == 0) {
++ leap = -1;
++ time_state = TIME_OOP;
++ time_tai++;
++ printk(KERN_NOTICE
++ "Clock: inserting leap second 23:59:60 UTC\n");
++ }
+ break;
+ case TIME_DEL:
+- timekeeping_leap_insert(1);
+- time_tai--;
+- time_state = TIME_WAIT;
+- printk(KERN_NOTICE
+- "Clock: deleting leap second 23:59:59 UTC\n");
++ if ((secs + 1) % 86400 == 0) {
++ leap = 1;
++ time_tai--;
++ time_state = TIME_WAIT;
++ printk(KERN_NOTICE
++ "Clock: deleting leap second 23:59:59 UTC\n");
++ }
+ break;
+ case TIME_OOP:
+- time_tai++;
+ time_state = TIME_WAIT;
+- /* fall through */
++ break;
++
+ case TIME_WAIT:
+ if (!(time_status & (STA_INS | STA_DEL)))
+ time_state = TIME_OK;
+ break;
+ }
+
+- write_sequnlock(&xtime_lock);
+-
+- return res;
+-}
+-
+-/*
+- * this routine handles the overflow of the microsecond field
+- *
+- * The tricky bits of code to handle the accurate clock support
+- * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
+- * They were originally developed for SUN and DEC kernels.
+- * All the kudos should go to Dave for this stuff.
+- */
+-void second_overflow(void)
+-{
+- s64 delta;
+
+ /* Bump the maxerror field */
+ time_maxerror += MAXFREQ / NSEC_PER_USEC;
+@@ -423,23 +421,25 @@ void second_overflow(void)
+ pps_dec_valid();
+
+ if (!time_adjust)
+- return;
++ goto out;
+
+ if (time_adjust > MAX_TICKADJ) {
+ time_adjust -= MAX_TICKADJ;
+ tick_length += MAX_TICKADJ_SCALED;
+- return;
++ goto out;
+ }
+
+ if (time_adjust < -MAX_TICKADJ) {
+ time_adjust += MAX_TICKADJ;
+ tick_length -= MAX_TICKADJ_SCALED;
+- return;
++ goto out;
+ }
+
+ tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
+ << NTP_SCALE_SHIFT;
+ time_adjust = 0;
++out:
++ return leap;
+ }
+
+ #ifdef CONFIG_GENERIC_CMOS_UPDATE
+@@ -501,27 +501,6 @@ static void notify_cmos_timer(void)
+ static inline void notify_cmos_timer(void) { }
+ #endif
+
+-/*
+- * Start the leap seconds timer:
+- */
+-static inline void ntp_start_leap_timer(struct timespec *ts)
+-{
+- long now = ts->tv_sec;
+-
+- if (time_status & STA_INS) {
+- time_state = TIME_INS;
+- now += 86400 - now % 86400;
+- hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
+-
+- return;
+- }
+-
+- if (time_status & STA_DEL) {
+- time_state = TIME_DEL;
+- now += 86400 - (now + 1) % 86400;
+- hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
+- }
+-}
+
+ /*
+ * Propagate a new txc->status value into the NTP state:
+@@ -546,22 +525,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
+ time_status &= STA_RONLY;
+ time_status |= txc->status & ~STA_RONLY;
+
+- switch (time_state) {
+- case TIME_OK:
+- ntp_start_leap_timer(ts);
+- break;
+- case TIME_INS:
+- case TIME_DEL:
+- time_state = TIME_OK;
+- ntp_start_leap_timer(ts);
+- case TIME_WAIT:
+- if (!(time_status & (STA_INS | STA_DEL)))
+- time_state = TIME_OK;
+- break;
+- case TIME_OOP:
+- hrtimer_restart(&leap_timer);
+- break;
+- }
+ }
+ /*
+ * Called with the xtime lock held, so we can access and modify
+@@ -643,9 +606,6 @@ int do_adjtimex(struct timex *txc)
+ (txc->tick < 900000/USER_HZ ||
+ txc->tick > 1100000/USER_HZ))
+ return -EINVAL;
+-
+- if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
+- hrtimer_cancel(&leap_timer);
+ }
+
+ if (txc->modes & ADJ_SETOFFSET) {
+@@ -967,6 +927,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
+ void __init ntp_init(void)
+ {
+ ntp_clear();
+- hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+- leap_timer.function = ntp_leap_second;
+ }
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index c923640..9955ebd 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -430,6 +430,7 @@ void tick_nohz_stop_sched_tick(int inidle)
+ */
+ if (!ts->tick_stopped) {
+ select_nohz_load_balancer(1);
++ calc_load_enter_idle();
+
+ ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
+ ts->tick_stopped = 1;
+@@ -563,6 +564,7 @@ void tick_nohz_restart_sched_tick(void)
+ account_idle_ticks(ticks);
+ #endif
+
++ calc_load_exit_idle();
+ touch_softlockup_watchdog();
+ /*
+ * Cancel the scheduled timer and restore the tick
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 2378413..03e67d4 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -161,23 +161,43 @@ static struct timespec xtime __attribute__ ((aligned (16)));
+ static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
+ static struct timespec total_sleep_time;
+
++/* Offset clock monotonic -> clock realtime */
++static ktime_t offs_real;
++
++/* Offset clock monotonic -> clock boottime */
++static ktime_t offs_boot;
++
+ /*
+ * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
+ */
+ static struct timespec raw_time;
+
+-/* flag for if timekeeping is suspended */
+-int __read_mostly timekeeping_suspended;
++/* must hold write on xtime_lock */
++static void update_rt_offset(void)
++{
++ struct timespec tmp, *wtm = &wall_to_monotonic;
+
+-/* must hold xtime_lock */
+-void timekeeping_leap_insert(int leapsecond)
++ set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
++ offs_real = timespec_to_ktime(tmp);
++}
++
++/* must hold write on xtime_lock */
++static void timekeeping_update(bool clearntp)
+ {
+- xtime.tv_sec += leapsecond;
+- wall_to_monotonic.tv_sec -= leapsecond;
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ if (clearntp) {
++ timekeeper.ntp_error = 0;
++ ntp_clear();
++ }
++ update_rt_offset();
++ update_vsyscall(&xtime, &wall_to_monotonic,
++ timekeeper.clock, timekeeper.mult);
+ }
+
++
++
++/* flag for if timekeeping is suspended */
++int __read_mostly timekeeping_suspended;
++
+ /**
+ * timekeeping_forward_now - update clock to the current time
+ *
+@@ -375,11 +395,7 @@ int do_settimeofday(const struct timespec *tv)
+
+ xtime = *tv;
+
+- timekeeper.ntp_error = 0;
+- ntp_clear();
+-
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ timekeeping_update(true);
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+@@ -412,11 +428,7 @@ int timekeeping_inject_offset(struct timespec *ts)
+ xtime = timespec_add(xtime, *ts);
+ wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
+
+- timekeeper.ntp_error = 0;
+- ntp_clear();
+-
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ timekeeping_update(true);
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+@@ -591,6 +603,7 @@ void __init timekeeping_init(void)
+ }
+ set_normalized_timespec(&wall_to_monotonic,
+ -boot.tv_sec, -boot.tv_nsec);
++ update_rt_offset();
+ total_sleep_time.tv_sec = 0;
+ total_sleep_time.tv_nsec = 0;
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+@@ -599,6 +612,12 @@ void __init timekeeping_init(void)
+ /* time in seconds when suspend began */
+ static struct timespec timekeeping_suspend_time;
+
++static void update_sleep_time(struct timespec t)
++{
++ total_sleep_time = t;
++ offs_boot = timespec_to_ktime(t);
++}
++
+ /**
+ * __timekeeping_inject_sleeptime - Internal function to add sleep interval
+ * @delta: pointer to a timespec delta value
+@@ -616,7 +635,7 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
+
+ xtime = timespec_add(xtime, *delta);
+ wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta);
+- total_sleep_time = timespec_add(total_sleep_time, *delta);
++ update_sleep_time(timespec_add(total_sleep_time, *delta));
+ }
+
+
+@@ -645,10 +664,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
+
+ __timekeeping_inject_sleeptime(delta);
+
+- timekeeper.ntp_error = 0;
+- ntp_clear();
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ timekeeping_update(true);
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+@@ -683,6 +699,7 @@ static void timekeeping_resume(void)
+ timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
+ timekeeper.ntp_error = 0;
+ timekeeping_suspended = 0;
++ timekeeping_update(false);
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+ touch_softlockup_watchdog();
+@@ -942,9 +959,14 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
+
+ timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
+ while (timekeeper.xtime_nsec >= nsecps) {
++ int leap;
+ timekeeper.xtime_nsec -= nsecps;
+ xtime.tv_sec++;
+- second_overflow();
++ leap = second_overflow(xtime.tv_sec);
++ xtime.tv_sec += leap;
++ wall_to_monotonic.tv_sec -= leap;
++ if (leap)
++ clock_was_set_delayed();
+ }
+
+ /* Accumulate raw time */
+@@ -1050,14 +1072,17 @@ static void update_wall_time(void)
+ * xtime.tv_nsec isn't larger then NSEC_PER_SEC
+ */
+ if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
++ int leap;
+ xtime.tv_nsec -= NSEC_PER_SEC;
+ xtime.tv_sec++;
+- second_overflow();
++ leap = second_overflow(xtime.tv_sec);
++ xtime.tv_sec += leap;
++ wall_to_monotonic.tv_sec -= leap;
++ if (leap)
++ clock_was_set_delayed();
+ }
+
+- /* check to see if there is a new clocksource to use */
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ timekeeping_update(false);
+ }
+
+ /**
+@@ -1216,6 +1241,40 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
+ } while (read_seqretry(&xtime_lock, seq));
+ }
+
++#ifdef CONFIG_HIGH_RES_TIMERS
++/**
++ * ktime_get_update_offsets - hrtimer helper
++ * @real: pointer to storage for monotonic -> realtime offset
++ * @_boot: pointer to storage for monotonic -> boottime offset
++ *
++ * Returns current monotonic time and updates the offsets
++ * Called from hrtimer_interupt() or retrigger_next_event()
++ */
++ktime_t ktime_get_update_offsets(ktime_t *real, ktime_t *boot)
++{
++ ktime_t now;
++ unsigned int seq;
++ u64 secs, nsecs;
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++
++ secs = xtime.tv_sec;
++ nsecs = xtime.tv_nsec;
++ nsecs += timekeeping_get_ns();
++ /* If arch requires, add in gettimeoffset() */
++ nsecs += arch_gettimeoffset();
++
++ *real = offs_real;
++ *boot = offs_boot;
++ } while (read_seqretry(&xtime_lock, seq));
++
++ now = ktime_add_ns(ktime_set(secs, 0), nsecs);
++ now = ktime_sub(now, *real);
++ return now;
++}
++#endif
++
+ /**
+ * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
+ */
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 8fb8a40..50f1c60 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -592,8 +592,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ if (err) {
+ putback_lru_pages(&cc->migratepages);
+ cc->nr_migratepages = 0;
++ if (err == -ENOMEM) {
++ ret = COMPACT_PARTIAL;
++ goto out;
++ }
+ }
+-
+ }
+
+ out:
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 5f5c545..7c535b0 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -53,6 +53,84 @@ static unsigned long __initdata default_hstate_size;
+ */
+ static DEFINE_SPINLOCK(hugetlb_lock);
+
++static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
++{
++ bool free = (spool->count == 0) && (spool->used_hpages == 0);
++
++ spin_unlock(&spool->lock);
++
++ /* If no pages are used, and no other handles to the subpool
++ * remain, free the subpool the subpool remain */
++ if (free)
++ kfree(spool);
++}
++
++struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
++{
++ struct hugepage_subpool *spool;
++
++ spool = kmalloc(sizeof(*spool), GFP_KERNEL);
++ if (!spool)
++ return NULL;
++
++ spin_lock_init(&spool->lock);
++ spool->count = 1;
++ spool->max_hpages = nr_blocks;
++ spool->used_hpages = 0;
++
++ return spool;
++}
++
++void hugepage_put_subpool(struct hugepage_subpool *spool)
++{
++ spin_lock(&spool->lock);
++ BUG_ON(!spool->count);
++ spool->count--;
++ unlock_or_release_subpool(spool);
++}
++
++static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
++ long delta)
++{
++ int ret = 0;
++
++ if (!spool)
++ return 0;
++
++ spin_lock(&spool->lock);
++ if ((spool->used_hpages + delta) <= spool->max_hpages) {
++ spool->used_hpages += delta;
++ } else {
++ ret = -ENOMEM;
++ }
++ spin_unlock(&spool->lock);
++
++ return ret;
++}
++
++static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
++ long delta)
++{
++ if (!spool)
++ return;
++
++ spin_lock(&spool->lock);
++ spool->used_hpages -= delta;
++ /* If hugetlbfs_put_super couldn't free spool due to
++ * an outstanding quota reference, free it now. */
++ unlock_or_release_subpool(spool);
++}
++
++static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
++{
++ return HUGETLBFS_SB(inode->i_sb)->spool;
++}
++
++static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
++{
++ return subpool_inode(vma->vm_file->f_dentry->d_inode);
++}
++
+ /*
+ * Region tracking -- allows tracking of reservations and instantiated pages
+ * across the pages in a mapping.
+@@ -533,9 +611,9 @@ static void free_huge_page(struct page *page)
+ */
+ struct hstate *h = page_hstate(page);
+ int nid = page_to_nid(page);
+- struct address_space *mapping;
++ struct hugepage_subpool *spool =
++ (struct hugepage_subpool *)page_private(page);
+
+- mapping = (struct address_space *) page_private(page);
+ set_page_private(page, 0);
+ page->mapping = NULL;
+ BUG_ON(page_count(page));
+@@ -551,8 +629,7 @@ static void free_huge_page(struct page *page)
+ enqueue_huge_page(h, page);
+ }
+ spin_unlock(&hugetlb_lock);
+- if (mapping)
+- hugetlb_put_quota(mapping, 1);
++ hugepage_subpool_put_pages(spool, 1);
+ }
+
+ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
+@@ -966,11 +1043,12 @@ static void return_unused_surplus_pages(struct hstate *h,
+ /*
+ * Determine if the huge page at addr within the vma has an associated
+ * reservation. Where it does not we will need to logically increase
+- * reservation and actually increase quota before an allocation can occur.
+- * Where any new reservation would be required the reservation change is
+- * prepared, but not committed. Once the page has been quota'd allocated
+- * an instantiated the change should be committed via vma_commit_reservation.
+- * No action is required on failure.
++ * reservation and actually increase subpool usage before an allocation
++ * can occur. Where any new reservation would be required the
++ * reservation change is prepared, but not committed. Once the page
++ * has been allocated from the subpool and instantiated the change should
++ * be committed via vma_commit_reservation. No action is required on
++ * failure.
+ */
+ static long vma_needs_reservation(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long addr)
+@@ -1019,24 +1097,24 @@ static void vma_commit_reservation(struct hstate *h,
+ static struct page *alloc_huge_page(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve)
+ {
++ struct hugepage_subpool *spool = subpool_vma(vma);
+ struct hstate *h = hstate_vma(vma);
+ struct page *page;
+- struct address_space *mapping = vma->vm_file->f_mapping;
+- struct inode *inode = mapping->host;
+ long chg;
+
+ /*
+- * Processes that did not create the mapping will have no reserves and
+- * will not have accounted against quota. Check that the quota can be
+- * made before satisfying the allocation
+- * MAP_NORESERVE mappings may also need pages and quota allocated
+- * if no reserve mapping overlaps.
++ * Processes that did not create the mapping will have no
++ * reserves and will not have accounted against subpool
++ * limit. Check that the subpool limit can be made before
++ * satisfying the allocation MAP_NORESERVE mappings may also
++ * need pages and subpool limit allocated allocated if no reserve
++ * mapping overlaps.
+ */
+ chg = vma_needs_reservation(h, vma, addr);
+ if (chg < 0)
+ return ERR_PTR(-VM_FAULT_OOM);
+ if (chg)
+- if (hugetlb_get_quota(inode->i_mapping, chg))
++ if (hugepage_subpool_get_pages(spool, chg))
+ return ERR_PTR(-VM_FAULT_SIGBUS);
+
+ spin_lock(&hugetlb_lock);
+@@ -1046,12 +1124,12 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
+ if (!page) {
+ page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
+ if (!page) {
+- hugetlb_put_quota(inode->i_mapping, chg);
++ hugepage_subpool_put_pages(spool, chg);
+ return ERR_PTR(-VM_FAULT_SIGBUS);
+ }
+ }
+
+- set_page_private(page, (unsigned long) mapping);
++ set_page_private(page, (unsigned long)spool);
+
+ vma_commit_reservation(h, vma, addr);
+
+@@ -2081,6 +2159,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
+ {
+ struct hstate *h = hstate_vma(vma);
+ struct resv_map *reservations = vma_resv_map(vma);
++ struct hugepage_subpool *spool = subpool_vma(vma);
+ unsigned long reserve;
+ unsigned long start;
+ unsigned long end;
+@@ -2096,7 +2175,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
+
+ if (reserve) {
+ hugetlb_acct_memory(h, -reserve);
+- hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
++ hugepage_subpool_put_pages(spool, reserve);
+ }
+ }
+ }
+@@ -2326,7 +2405,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ address = address & huge_page_mask(h);
+ pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
+ + (vma->vm_pgoff >> PAGE_SHIFT);
+- mapping = (struct address_space *)page_private(page);
++ mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
+
+ /*
+ * Take the mapping lock for the duration of the table walk. As
+@@ -2865,11 +2944,12 @@ int hugetlb_reserve_pages(struct inode *inode,
+ {
+ long ret, chg;
+ struct hstate *h = hstate_inode(inode);
++ struct hugepage_subpool *spool = subpool_inode(inode);
+
+ /*
+ * Only apply hugepage reservation if asked. At fault time, an
+ * attempt will be made for VM_NORESERVE to allocate a page
+- * and filesystem quota without using reserves
++ * without using reserves
+ */
+ if (vm_flags & VM_NORESERVE)
+ return 0;
+@@ -2898,19 +2978,19 @@ int hugetlb_reserve_pages(struct inode *inode,
+ goto out_err;
+ }
+
+- /* There must be enough filesystem quota for the mapping */
+- if (hugetlb_get_quota(inode->i_mapping, chg)) {
++ /* There must be enough pages in the subpool for the mapping */
++ if (hugepage_subpool_get_pages(spool, chg)) {
+ ret = -ENOSPC;
+ goto out_err;
+ }
+
+ /*
+ * Check enough hugepages are available for the reservation.
+- * Hand back the quota if there are not
++ * Hand the pages back to the subpool if there are not
+ */
+ ret = hugetlb_acct_memory(h, chg);
+ if (ret < 0) {
+- hugetlb_put_quota(inode->i_mapping, chg);
++ hugepage_subpool_put_pages(spool, chg);
+ goto out_err;
+ }
+
+@@ -2938,12 +3018,13 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
+ {
+ struct hstate *h = hstate_inode(inode);
+ long chg = region_truncate(&inode->i_mapping->private_list, offset);
++ struct hugepage_subpool *spool = subpool_inode(inode);
+
+ spin_lock(&inode->i_lock);
+ inode->i_blocks -= (blocks_per_huge_page(h) * freed);
+ spin_unlock(&inode->i_lock);
+
+- hugetlb_put_quota(inode->i_mapping, (chg - freed));
++ hugepage_subpool_put_pages(spool, (chg - freed));
+ hugetlb_acct_memory(h, -(chg - freed));
+ }
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index fbe2d2c..8342119 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2824,7 +2824,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
+ * them before going back to sleep.
+ */
+ set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
+- schedule();
++
++ if (!kthread_should_stop())
++ schedule();
++
+ set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
+ } else {
+ if (remaining)
+@@ -3090,14 +3093,17 @@ int kswapd_run(int nid)
+ }
+
+ /*
+- * Called by memory hotplug when all memory in a node is offlined.
++ * Called by memory hotplug when all memory in a node is offlined. Caller must
++ * hold lock_memory_hotplug().
+ */
+ void kswapd_stop(int nid)
+ {
+ struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
+
+- if (kswapd)
++ if (kswapd) {
+ kthread_stop(kswapd);
++ NODE_DATA(nid)->kswapd = NULL;
++ }
+ }
+
+ static int __init kswapd_init(void)
+diff --git a/net/can/raw.c b/net/can/raw.c
+index cde1b4a..46cca3a 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -681,9 +681,6 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
+ if (err < 0)
+ goto free_skb;
+
+- /* to be able to check the received tx sock reference in raw_rcv() */
+- skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
+-
+ skb->dev = dev;
+ skb->sk = sk;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 1cbddc9..5738654 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2079,25 +2079,6 @@ static int dev_gso_segment(struct sk_buff *skb, int features)
+ return 0;
+ }
+
+-/*
+- * Try to orphan skb early, right before transmission by the device.
+- * We cannot orphan skb if tx timestamp is requested or the sk-reference
+- * is needed on driver level for other reasons, e.g. see net/can/raw.c
+- */
+-static inline void skb_orphan_try(struct sk_buff *skb)
+-{
+- struct sock *sk = skb->sk;
+-
+- if (sk && !skb_shinfo(skb)->tx_flags) {
+- /* skb_tx_hash() wont be able to get sk.
+- * We copy sk_hash into skb->rxhash
+- */
+- if (!skb->rxhash)
+- skb->rxhash = sk->sk_hash;
+- skb_orphan(skb);
+- }
+-}
+-
+ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+ {
+ return ((features & NETIF_F_GEN_CSUM) ||
+@@ -2182,8 +2163,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ if (!list_empty(&ptype_all))
+ dev_queue_xmit_nit(skb, dev);
+
+- skb_orphan_try(skb);
+-
+ features = netif_skb_features(skb);
+
+ if (vlan_tx_tag_present(skb) &&
+@@ -2293,7 +2272,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+ if (skb->sk && skb->sk->sk_hash)
+ hash = skb->sk->sk_hash;
+ else
+- hash = (__force u16) skb->protocol ^ skb->rxhash;
++ hash = (__force u16) skb->protocol;
+ hash = jhash_1word(hash, hashrnd);
+
+ return (u16) (((u64) hash * qcount) >> 32) + qoffset;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 9726927..32e6ca2 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5836,6 +5836,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ goto discard;
+
+ if (th->syn) {
++ if (th->fin)
++ goto discard;
+ if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
+ return 1;
+
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 274d150..cf98d62 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -380,7 +380,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
+ skb_trim(skb, skb->dev->mtu);
+ }
+ skb->protocol = ETH_P_AF_IUCV;
+- skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index d38815d..74d5292 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -813,7 +813,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ ntype == NL80211_IFTYPE_P2P_CLIENT))
+ return -EBUSY;
+
+- if (ntype != otype) {
++ if (ntype != otype && netif_running(dev)) {
+ err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
+ ntype);
+ if (err)
+diff --git a/scripts/depmod.sh b/scripts/depmod.sh
+index a272356..2ae4817 100755
+--- a/scripts/depmod.sh
++++ b/scripts/depmod.sh
+@@ -9,12 +9,6 @@ fi
+ DEPMOD=$1
+ KERNELRELEASE=$2
+
+-if ! "$DEPMOD" -V 2>/dev/null | grep -q module-init-tools; then
+- echo "Warning: you may need to install module-init-tools" >&2
+- echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt" >&2
+- sleep 1
+-fi
+-
+ if ! test -r System.map -a -x "$DEPMOD"; then
+ exit 0
+ fi
+diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
+index 9f614b4..272407c 100644
+--- a/virt/kvm/irq_comm.c
++++ b/virt/kvm/irq_comm.c
+@@ -318,6 +318,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
+ */
+ hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
+ if (ei->type == KVM_IRQ_ROUTING_MSI ||
++ ue->type == KVM_IRQ_ROUTING_MSI ||
+ ue->u.irqchip.irqchip == ei->irqchip.irqchip)
+ return r;
+
diff --git a/3.2.23/4420_grsecurity-2.9.1-3.2.23-201207242236.patch b/3.2.24/4420_grsecurity-2.9.1-3.2.24-201207281946.patch
index acc5089..d960312 100644
--- a/3.2.23/4420_grsecurity-2.9.1-3.2.23-201207242236.patch
+++ b/3.2.24/4420_grsecurity-2.9.1-3.2.24-201207281946.patch
@@ -1,5 +1,5 @@
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
-index dfa6fc6..7afd8a1 100644
+index dfa6fc6..65f7dbe 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -2,9 +2,11 @@
@@ -22,7 +22,7 @@ index dfa6fc6..7afd8a1 100644
*.grep
*.grp
*.gz
-@@ -48,9 +51,11 @@
+@@ -48,14 +51,17 @@
*.tab.h
*.tex
*.ver
@@ -34,7 +34,14 @@ index dfa6fc6..7afd8a1 100644
*_vga16.c
*~
\#*#
-@@ -70,6 +75,7 @@ Kerntypes
+ *.9
+-.*
++.[^g]*
++.gen*
+ .*.d
+ .mm
+ 53c700_d.h
+@@ -70,6 +76,7 @@ Kerntypes
Module.markers
Module.symvers
PENDING
@@ -42,7 +49,7 @@ index dfa6fc6..7afd8a1 100644
SCCS
System.map*
TAGS
-@@ -81,6 +87,7 @@ aic7*seq.h*
+@@ -81,6 +88,7 @@ aic7*seq.h*
aicasm
aicdb.h*
altivec*.c
@@ -50,7 +57,7 @@ index dfa6fc6..7afd8a1 100644
asm-offsets.h
asm_offsets.h
autoconf.h*
-@@ -93,19 +100,24 @@ bounds.h
+@@ -93,19 +101,24 @@ bounds.h
bsetup
btfixupprep
build
@@ -75,7 +82,7 @@ index dfa6fc6..7afd8a1 100644
conmakehash
consolemap_deftbl.c*
cpustr.h
-@@ -116,9 +128,11 @@ devlist.h*
+@@ -116,9 +129,11 @@ devlist.h*
dnotify_test
docproc
dslm
@@ -87,7 +94,7 @@ index dfa6fc6..7afd8a1 100644
fixdep
flask.h
fore200e_mkfirm
-@@ -126,12 +140,15 @@ fore200e_pca_fw.c*
+@@ -126,12 +141,15 @@ fore200e_pca_fw.c*
gconf
gconf.glade.h
gen-devlist
@@ -103,7 +110,7 @@ index dfa6fc6..7afd8a1 100644
hpet_example
hugepage-mmap
hugepage-shm
-@@ -146,7 +163,7 @@ int32.c
+@@ -146,7 +164,7 @@ int32.c
int4.c
int8.c
kallsyms
@@ -112,7 +119,7 @@ index dfa6fc6..7afd8a1 100644
keywords.c
ksym.c*
ksym.h*
-@@ -154,7 +171,7 @@ kxgettext
+@@ -154,7 +172,7 @@ kxgettext
lkc_defs.h
lex.c
lex.*.c
@@ -121,7 +128,7 @@ index dfa6fc6..7afd8a1 100644
logo_*.c
logo_*_clut224.c
logo_*_mono.c
-@@ -166,14 +183,15 @@ machtypes.h
+@@ -166,14 +184,15 @@ machtypes.h
map
map_hugetlb
maui_boot.h
@@ -138,7 +145,7 @@ index dfa6fc6..7afd8a1 100644
mkprep
mkregtable
mktables
-@@ -209,6 +227,7 @@ r300_reg_safe.h
+@@ -209,6 +228,7 @@ r300_reg_safe.h
r420_reg_safe.h
r600_reg_safe.h
recordmcount
@@ -146,7 +153,7 @@ index dfa6fc6..7afd8a1 100644
relocs
rlim_names.h
rn50_reg_safe.h
-@@ -218,7 +237,9 @@ series
+@@ -218,7 +238,9 @@ series
setup
setup.bin
setup.elf
@@ -156,7 +163,7 @@ index dfa6fc6..7afd8a1 100644
sm_tbl*
split-include
syscalltab.h
-@@ -229,6 +250,7 @@ tftpboot.img
+@@ -229,6 +251,7 @@ tftpboot.img
timeconst.h
times.h*
trix_boot.h
@@ -164,7 +171,7 @@ index dfa6fc6..7afd8a1 100644
utsrelease.h*
vdso-syms.lds
vdso.lds
-@@ -246,7 +268,9 @@ vmlinux
+@@ -246,7 +269,9 @@ vmlinux
vmlinux-*
vmlinux.aout
vmlinux.bin.all
@@ -174,7 +181,7 @@ index dfa6fc6..7afd8a1 100644
vmlinuz
voffset.h
vsyscall.lds
-@@ -254,9 +278,11 @@ vsyscall_32.lds
+@@ -254,9 +279,11 @@ vsyscall_32.lds
wanxlfw.inc
uImage
unifdef
@@ -205,7 +212,7 @@ index 81c287f..d456d02 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 40d1e3b..bf02dfb 100644
+index 80bb4fd..964ea28 100644
--- a/Makefile
+++ b/Makefile
@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -231,7 +238,7 @@ index 40d1e3b..bf02dfb 100644
$(Q)$(MAKE) $(build)=scripts/basic
$(Q)rm -f .tmp_quiet_recordmcount
-@@ -564,6 +565,56 @@ else
+@@ -564,6 +565,60 @@ else
KBUILD_CFLAGS += -O2
endif
@@ -264,10 +271,14 @@ index 40d1e3b..bf02dfb 100644
+ifdef CONFIG_PAX_SIZE_OVERFLOW
+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
+endif
++ifdef CONFIG_PAX_LATENT_ENTROPY
++LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
++endif
+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
-+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
-+export PLUGINCC CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
++export PLUGINCC CONSTIFY_PLUGIN
+ifeq ($(KBUILD_EXTMOD),)
+gcc-plugins:
+ $(Q)$(MAKE) $(build)=tools/gcc
@@ -288,7 +299,7 @@ index 40d1e3b..bf02dfb 100644
include $(srctree)/arch/$(SRCARCH)/Makefile
ifneq ($(CONFIG_FRAME_WARN),0)
-@@ -708,7 +759,7 @@ export mod_strip_cmd
+@@ -708,7 +763,7 @@ export mod_strip_cmd
ifeq ($(KBUILD_EXTMOD),)
@@ -297,7 +308,7 @@ index 40d1e3b..bf02dfb 100644
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -932,6 +983,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
+@@ -932,6 +987,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
@@ -306,7 +317,7 @@ index 40d1e3b..bf02dfb 100644
$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
# Handle descending into subdirectories listed in $(vmlinux-dirs)
-@@ -941,7 +994,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+@@ -941,7 +998,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
# Error messages still appears in the original language
PHONY += $(vmlinux-dirs)
@@ -315,7 +326,7 @@ index 40d1e3b..bf02dfb 100644
$(Q)$(MAKE) $(build)=$@
# Store (new) KERNELRELASE string in include/config/kernel.release
-@@ -985,6 +1038,7 @@ prepare0: archprepare FORCE
+@@ -985,6 +1042,7 @@ prepare0: archprepare FORCE
$(Q)$(MAKE) $(build)=.
# All the preparing..
@@ -323,7 +334,7 @@ index 40d1e3b..bf02dfb 100644
prepare: prepare0
# Generate some files
-@@ -1089,6 +1143,8 @@ all: modules
+@@ -1089,6 +1147,8 @@ all: modules
# using awk while concatenating to the final file.
PHONY += modules
@@ -332,7 +343,7 @@ index 40d1e3b..bf02dfb 100644
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
@$(kecho) ' Building modules, stage 2.';
-@@ -1104,7 +1160,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
+@@ -1104,7 +1164,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
# Target to prepare building external modules
PHONY += modules_prepare
@@ -341,7 +352,7 @@ index 40d1e3b..bf02dfb 100644
# Target to install modules
PHONY += modules_install
-@@ -1163,7 +1219,7 @@ CLEAN_FILES += vmlinux System.map \
+@@ -1163,7 +1223,7 @@ CLEAN_FILES += vmlinux System.map \
MRPROPER_DIRS += include/config usr/include include/generated \
arch/*/include/generated
MRPROPER_FILES += .config .config.old .version .old_version \
@@ -350,7 +361,7 @@ index 40d1e3b..bf02dfb 100644
Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
# clean - Delete most, but leave enough to build external modules
-@@ -1201,6 +1257,7 @@ distclean: mrproper
+@@ -1201,6 +1261,7 @@ distclean: mrproper
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
-o -name '.*.rej' \
@@ -358,7 +369,7 @@ index 40d1e3b..bf02dfb 100644
-o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
-type f -print | xargs rm -f
-@@ -1361,6 +1418,8 @@ PHONY += $(module-dirs) modules
+@@ -1361,6 +1422,8 @@ PHONY += $(module-dirs) modules
$(module-dirs): crmodverdir $(objtree)/Module.symvers
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
@@ -367,7 +378,7 @@ index 40d1e3b..bf02dfb 100644
modules: $(module-dirs)
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1487,17 +1546,21 @@ else
+@@ -1487,17 +1550,21 @@ else
target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
endif
@@ -393,7 +404,7 @@ index 40d1e3b..bf02dfb 100644
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.symtypes: %.c prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1507,11 +1570,15 @@ endif
+@@ -1507,11 +1574,15 @@ endif
$(cmd_crmodverdir)
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
$(build)=$(build-dir)
@@ -2887,7 +2898,7 @@ index 6018c80..7c37203 100644
#endif /* _ASM_SYSTEM_H */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
-index 97f8bf6..3986751 100644
+index adda036..e0f33bb 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -124,6 +124,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
@@ -6585,7 +6596,7 @@ index 301421c..e2535d1 100644
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
obj-y += fault_$(BITS).o
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
-index 8023fd7..c8e89e9 100644
+index 8023fd7..bb71401 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -21,6 +21,9 @@
@@ -6598,7 +6609,7 @@ index 8023fd7..c8e89e9 100644
#include <asm/system.h>
#include <asm/page.h>
-@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+@@ -208,6 +211,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
return safe_compute_effective_address(regs, insn);
}
@@ -6689,40 +6700,49 @@ index 8023fd7..c8e89e9 100644
+ }
+ } while (0);
+
-+ { /* PaX: patched PLT emulation #2 */
++ do { /* PaX: patched PLT emulation #2 */
+ unsigned int ba;
+
+ err = get_user(ba, (unsigned int *)regs->pc);
+
-+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
+ unsigned int addr;
+
-+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
+ regs->pc = addr;
+ regs->npc = addr+4;
+ return 2;
+ }
-+ }
++ } while (0);
+
+ do { /* PaX: patched PLT emulation #3 */
-+ unsigned int sethi, jmpl, nop;
++ unsigned int sethi, bajmpl, nop;
+
+ err = get_user(sethi, (unsigned int *)regs->pc);
-+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
+
+ if (err)
+ break;
+
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
+ nop == 0x01000000U)
+ {
+ unsigned int addr;
+
+ addr = (sethi & 0x003FFFFFU) << 10;
+ regs->u_regs[UREG_G1] = addr;
-+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ else
++ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
+ regs->pc = addr;
+ regs->npc = addr+4;
+ return 2;
@@ -6867,7 +6887,7 @@ index 8023fd7..c8e89e9 100644
static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
int text_fault)
{
-@@ -280,6 +545,24 @@ good_area:
+@@ -280,6 +554,24 @@ good_area:
if(!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
@@ -6893,7 +6913,7 @@ index 8023fd7..c8e89e9 100644
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
-index 504c062..6fcb9c6 100644
+index 504c062..a383267 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -21,6 +21,9 @@
@@ -6915,7 +6935,7 @@ index 504c062..6fcb9c6 100644
printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
dump_stack();
unhandled_fault(regs->tpc, current, regs);
-@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
+@@ -272,6 +275,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
show_regs(regs);
}
@@ -7010,15 +7030,21 @@ index 504c062..6fcb9c6 100644
+ }
+ } while (0);
+
-+ { /* PaX: patched PLT emulation #2 */
++ do { /* PaX: patched PLT emulation #2 */
+ unsigned int ba;
+
+ err = get_user(ba, (unsigned int *)regs->tpc);
+
-+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
+ unsigned long addr;
+
-+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
+
+ if (test_thread_flag(TIF_32BIT))
+ addr &= 0xFFFFFFFFUL;
@@ -7027,27 +7053,30 @@ index 504c062..6fcb9c6 100644
+ regs->tnpc = addr+4;
+ return 2;
+ }
-+ }
++ } while (0);
+
+ do { /* PaX: patched PLT emulation #3 */
-+ unsigned int sethi, jmpl, nop;
++ unsigned int sethi, bajmpl, nop;
+
+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
+
+ if (err)
+ break;
+
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
+ nop == 0x01000000U)
+ {
+ unsigned long addr;
+
+ addr = (sethi & 0x003FFFFFU) << 10;
+ regs->u_regs[UREG_G1] = addr;
-+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ else
++ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
+
+ if (test_thread_flag(TIF_32BIT))
+ addr &= 0xFFFFFFFFUL;
@@ -7373,7 +7402,7 @@ index 504c062..6fcb9c6 100644
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
-@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+@@ -340,6 +803,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
if (!vma)
goto bad_area;
@@ -9152,7 +9181,7 @@ index 20370c6..a2eb9b0 100644
"popl %%ebp\n\t"
"popl %%edi\n\t"
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
-index 58cb6d4..ca9010d 100644
+index 58cb6d4..a4b806c 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -22,7 +22,18 @@
@@ -9560,6 +9589,51 @@ index 58cb6d4..ca9010d 100644
/*
* atomic_dec_if_positive - decrement by 1 if old value positive
+@@ -293,14 +552,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
+ #endif
+
+ /* These are x86-specific, used by some header files */
+-#define atomic_clear_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "andl %0,%1" \
+- : : "r" (~(mask)), "m" (*(addr)) : "memory")
++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
+
+-#define atomic_set_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "orl %0,%1" \
+- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
+- : "memory")
++static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
++
++static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
++
++static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
+
+ /* Atomic operations are already serializing on x86 */
+ #define smp_mb__before_atomic_dec() barrier()
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 24098aa..1e37723 100644
--- a/arch/x86/include/asm/atomic64_32.h
@@ -18752,7 +18826,7 @@ index 42eb330..139955c 100644
return ret;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
-index 37a458b..e63d183 100644
+index e61f79c..bbbaa4d 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
@@ -18835,7 +18909,7 @@ index 37a458b..e63d183 100644
}
#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(machine_real_restart);
-@@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
+@@ -548,7 +578,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
* try to force a triple fault and then cycle between hitting the keyboard
* controller and doing that
*/
@@ -18844,7 +18918,7 @@ index 37a458b..e63d183 100644
{
int i;
int attempt = 0;
-@@ -664,13 +694,13 @@ void native_machine_shutdown(void)
+@@ -672,13 +702,13 @@ void native_machine_shutdown(void)
#endif
}
@@ -18860,7 +18934,7 @@ index 37a458b..e63d183 100644
{
printk("machine restart\n");
-@@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
+@@ -687,7 +717,7 @@ static void native_machine_restart(char *__unused)
__machine_emergency_restart(0);
}
@@ -18869,7 +18943,7 @@ index 37a458b..e63d183 100644
{
/* stop other cpus and apics */
machine_shutdown();
-@@ -690,7 +720,7 @@ static void native_machine_halt(void)
+@@ -698,7 +728,7 @@ static void native_machine_halt(void)
stop_this_cpu(NULL);
}
@@ -18878,7 +18952,7 @@ index 37a458b..e63d183 100644
{
if (pm_power_off) {
if (!reboot_force)
-@@ -699,6 +729,7 @@ static void native_machine_power_off(void)
+@@ -707,6 +737,7 @@ static void native_machine_power_off(void)
}
/* a fallback in case there is no PM info available */
tboot_shutdown(TB_SHUTDOWN_HALT);
@@ -27506,7 +27580,7 @@ index 7b72502..646105c 100644
err = -EFAULT;
goto out;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
-index 688be8a..8a37d98 100644
+index 9e76a32..48d7145 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
@@ -29562,7 +29636,7 @@ index 1aeaaba..e018570 100644
.part_num = MBCS_PART_NUM,
.mfg_num = MBCS_MFG_NUM,
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
-index 1451790..f705c30 100644
+index 1451790..3c7dfbb 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -18,6 +18,7 @@
@@ -29624,7 +29698,7 @@ index 1451790..f705c30 100644
- remaining = copy_to_user(buf, ptr, sz);
+#ifdef CONFIG_PAX_USERCOPY
-+ temp = kmalloc(sz, GFP_KERNEL);
++ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
+ if (!temp) {
+ unxlate_dev_mem_ptr(p, ptr);
+ return -ENOMEM;
@@ -29669,7 +29743,7 @@ index 1451790..f705c30 100644
- if (copy_to_user(buf, kbuf, sz))
+#ifdef CONFIG_PAX_USERCOPY
-+ temp = kmalloc(sz, GFP_KERNEL);
++ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
+ if (!temp)
+ return -ENOMEM;
+ memcpy(temp, kbuf, sz);
@@ -29711,7 +29785,7 @@ index da3cfee..a5a6606 100644
*ppos = i;
diff --git a/drivers/char/random.c b/drivers/char/random.c
-index 6035ab8..d45e4d4 100644
+index 6035ab8..c7e4a44 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -261,8 +261,13 @@
@@ -29746,7 +29820,25 @@ index 6035ab8..d45e4d4 100644
#if 0
/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
{ 2048, 1638, 1231, 819, 411, 1 },
-@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+@@ -722,6 +734,17 @@ void add_disk_randomness(struct gendisk *disk)
+ }
+ #endif
+
++#ifdef CONFIG_PAX_LATENT_ENTROPY
++u64 latent_entropy;
++
++__init void transfer_latent_entropy(void)
++{
++ mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy));
++ mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy));
++// printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
++}
++#endif
++
+ /*********************************************************************
+ *
+ * Entropy extraction routines
+@@ -909,7 +932,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
extract_buf(r, tmp);
i = min_t(int, nbytes, EXTRACT_SIZE);
@@ -29755,7 +29847,7 @@ index 6035ab8..d45e4d4 100644
ret = -EFAULT;
break;
}
-@@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
+@@ -1228,7 +1251,7 @@ EXPORT_SYMBOL(generate_random_uuid);
#include <linux/sysctl.h>
static int min_read_thresh = 8, min_write_thresh;
@@ -29764,7 +29856,7 @@ index 6035ab8..d45e4d4 100644
static int max_write_thresh = INPUT_POOL_WORDS * 32;
static char sysctl_bootid[16];
-@@ -1250,10 +1262,15 @@ static int proc_do_uuid(ctl_table *table, int write,
+@@ -1250,10 +1273,15 @@ static int proc_do_uuid(ctl_table *table, int write,
uuid = table->data;
if (!uuid) {
uuid = tmp_uuid;
@@ -30880,7 +30972,7 @@ index 578ddfc..86ac0d0 100644
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 6aa7716..8e5a304 100644
+index cc75c4b..4542065 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2196,7 +2196,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
@@ -30901,16 +30993,19 @@ index 6aa7716..8e5a304 100644
}
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
-@@ -6982,7 +6982,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+@@ -6980,9 +6980,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+
+ obj = work->old_fb_obj;
- atomic_clear_mask(1 << intel_crtc->plane,
- &obj->pending_flip.counter);
+- atomic_clear_mask(1 << intel_crtc->plane,
+- &obj->pending_flip.counter);
- if (atomic_read(&obj->pending_flip) == 0)
++ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
wake_up(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
-@@ -7177,7 +7177,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
+@@ -7177,7 +7176,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
OUT_RING(fb->pitch | obj->tiling_mode);
OUT_RING(obj->gtt_offset);
@@ -30925,7 +31020,7 @@ index 6aa7716..8e5a304 100644
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
ADVANCE_LP_RING();
-@@ -7309,7 +7315,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+@@ -7309,7 +7314,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
@@ -30934,7 +31029,7 @@ index 6aa7716..8e5a304 100644
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
-@@ -7323,7 +7329,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+@@ -7323,7 +7328,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_pending:
@@ -31617,10 +31712,10 @@ index 8a8725c..afed796 100644
marker = list_first_entry(&queue->head,
struct vmw_marker, head);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
-index c27b402..353115a 100644
+index 95430a0..1a65ca9 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
-@@ -2013,7 +2013,7 @@ static bool hid_ignore(struct hid_device *hdev)
+@@ -2020,7 +2020,7 @@ static bool hid_ignore(struct hid_device *hdev)
int hid_add_device(struct hid_device *hdev)
{
@@ -31629,7 +31724,7 @@ index c27b402..353115a 100644
int ret;
if (WARN_ON(hdev->status & HID_STAT_ADDED))
-@@ -2028,7 +2028,7 @@ int hid_add_device(struct hid_device *hdev)
+@@ -2035,7 +2035,7 @@ int hid_add_device(struct hid_device *hdev)
/* XXX hack, any other cleaner solution after the driver core
* is converted to allow more than 20 bytes as the device name? */
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
@@ -33125,10 +33220,10 @@ index b8d8611..7a4a04b 100644
#include <linux/input.h>
#include <linux/gameport.h>
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
-index d728875..844c89b 100644
+index 2189cbf..05ad609 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
-@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
+@@ -714,7 +714,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
static int xpad_led_probe(struct usb_xpad *xpad)
{
@@ -33137,7 +33232,7 @@ index d728875..844c89b 100644
long led_no;
struct xpad_led *led;
struct led_classdev *led_cdev;
-@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
+@@ -727,7 +727,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
if (!led)
return -ENOMEM;
@@ -33612,7 +33707,7 @@ index 1f23e04..08d9a20 100644
spin_lock(&receiving_list_lock);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
-index 9bfd057..01180bc 100644
+index dae2b7a..5c50c7e 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -40,7 +40,7 @@ enum dm_raid1_error {
@@ -33678,7 +33773,7 @@ index 9bfd057..01180bc 100644
ms->mirror[mirror].error_type = 0;
ms->mirror[mirror].offset = offset;
-@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
+@@ -1348,7 +1348,7 @@ static void mirror_resume(struct dm_target *ti)
*/
static char device_status_char(struct mirror *m)
{
@@ -33823,7 +33918,7 @@ index 4720f68..78d1df7 100644
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
diff --git a/drivers/md/md.c b/drivers/md/md.c
-index 700ecae..8122a9c 100644
+index d8646d7..8122a9c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
@@ -33895,125 +33990,7 @@ index 700ecae..8122a9c 100644
INIT_LIST_HEAD(&rdev->same_set);
init_waitqueue_head(&rdev->blocked_wait);
-@@ -3700,8 +3700,8 @@ array_state_show(struct mddev *mddev, char *page)
- return sprintf(page, "%s\n", array_states[st]);
- }
-
--static int do_md_stop(struct mddev * mddev, int ro, int is_open);
--static int md_set_readonly(struct mddev * mddev, int is_open);
-+static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
-+static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
- static int do_md_run(struct mddev * mddev);
- static int restart_array(struct mddev *mddev);
-
-@@ -3717,14 +3717,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
- /* stopping an active array */
- if (atomic_read(&mddev->openers) > 0)
- return -EBUSY;
-- err = do_md_stop(mddev, 0, 0);
-+ err = do_md_stop(mddev, 0, NULL);
- break;
- case inactive:
- /* stopping an active array */
- if (mddev->pers) {
- if (atomic_read(&mddev->openers) > 0)
- return -EBUSY;
-- err = do_md_stop(mddev, 2, 0);
-+ err = do_md_stop(mddev, 2, NULL);
- } else
- err = 0; /* already inactive */
- break;
-@@ -3732,7 +3732,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
- break; /* not supported yet */
- case readonly:
- if (mddev->pers)
-- err = md_set_readonly(mddev, 0);
-+ err = md_set_readonly(mddev, NULL);
- else {
- mddev->ro = 1;
- set_disk_ro(mddev->gendisk, 1);
-@@ -3742,7 +3742,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
- case read_auto:
- if (mddev->pers) {
- if (mddev->ro == 0)
-- err = md_set_readonly(mddev, 0);
-+ err = md_set_readonly(mddev, NULL);
- else if (mddev->ro == 1)
- err = restart_array(mddev);
- if (err == 0) {
-@@ -5078,15 +5078,17 @@ void md_stop(struct mddev *mddev)
- }
- EXPORT_SYMBOL_GPL(md_stop);
-
--static int md_set_readonly(struct mddev *mddev, int is_open)
-+static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
- {
- int err = 0;
- mutex_lock(&mddev->open_mutex);
-- if (atomic_read(&mddev->openers) > is_open) {
-+ if (atomic_read(&mddev->openers) > !!bdev) {
- printk("md: %s still in use.\n",mdname(mddev));
- err = -EBUSY;
- goto out;
- }
-+ if (bdev)
-+ sync_blockdev(bdev);
- if (mddev->pers) {
- __md_stop_writes(mddev);
-
-@@ -5108,18 +5110,26 @@ out:
- * 0 - completely stop and dis-assemble array
- * 2 - stop but do not disassemble array
- */
--static int do_md_stop(struct mddev * mddev, int mode, int is_open)
-+static int do_md_stop(struct mddev * mddev, int mode,
-+ struct block_device *bdev)
- {
- struct gendisk *disk = mddev->gendisk;
- struct md_rdev *rdev;
-
- mutex_lock(&mddev->open_mutex);
-- if (atomic_read(&mddev->openers) > is_open ||
-+ if (atomic_read(&mddev->openers) > !!bdev ||
- mddev->sysfs_active) {
- printk("md: %s still in use.\n",mdname(mddev));
- mutex_unlock(&mddev->open_mutex);
- return -EBUSY;
- }
-+ if (bdev)
-+ /* It is possible IO was issued on some other
-+ * open file which was closed before we took ->open_mutex.
-+ * As that was not the last close __blkdev_put will not
-+ * have called sync_blockdev, so we must.
-+ */
-+ sync_blockdev(bdev);
-
- if (mddev->pers) {
- if (mddev->ro)
-@@ -5193,7 +5203,7 @@ static void autorun_array(struct mddev *mddev)
- err = do_md_run(mddev);
- if (err) {
- printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
-- do_md_stop(mddev, 0, 0);
-+ do_md_stop(mddev, 0, NULL);
- }
- }
-
-@@ -6184,11 +6194,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
- goto done_unlock;
-
- case STOP_ARRAY:
-- err = do_md_stop(mddev, 0, 1);
-+ err = do_md_stop(mddev, 0, bdev);
- goto done_unlock;
-
- case STOP_ARRAY_RO:
-- err = md_set_readonly(mddev, 1);
-+ err = md_set_readonly(mddev, bdev);
- goto done_unlock;
-
- case BLKROSET:
-@@ -6686,7 +6696,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
+@@ -6696,7 +6696,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
spin_unlock(&pers_lock);
seq_printf(seq, "\n");
@@ -34022,7 +33999,7 @@ index 700ecae..8122a9c 100644
return 0;
}
if (v == (void*)2) {
-@@ -6775,7 +6785,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
+@@ -6785,7 +6785,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
chunk_kb ? "KB" : "B");
if (bitmap->file) {
seq_printf(seq, ", file: ");
@@ -34031,7 +34008,7 @@ index 700ecae..8122a9c 100644
}
seq_printf(seq, "\n");
-@@ -6806,7 +6816,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
+@@ -6816,7 +6816,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
return error;
seq = file->private_data;
@@ -34040,7 +34017,7 @@ index 700ecae..8122a9c 100644
return error;
}
-@@ -6820,7 +6830,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
+@@ -6830,7 +6830,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
/* always allow read */
mask = POLLIN | POLLRDNORM;
@@ -34049,7 +34026,7 @@ index 700ecae..8122a9c 100644
mask |= POLLERR | POLLPRI;
return mask;
}
-@@ -6864,7 +6874,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
+@@ -6874,7 +6874,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
(int)part_stat_read(&disk->part0, sectors[1]) -
@@ -34139,7 +34116,7 @@ index 1cbfc6b..56e1dbb 100644
/*----------------------------------------------------------------*/
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index 7af60ec..10a4a5d 100644
+index 2d97bf0..5caa9cf 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1581,7 +1581,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
@@ -34151,7 +34128,7 @@ index 7af60ec..10a4a5d 100644
}
sectors -= s;
sect += s;
-@@ -1794,7 +1794,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+@@ -1800,7 +1800,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
test_bit(In_sync, &rdev->flags)) {
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
@@ -34224,10 +34201,10 @@ index 7a9eef6..707cb03 100644
rdev_dec_pending(rdev, mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index 6ba4954..5ce122a 100644
+index 26ef63a..bd587cd 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
-@@ -1616,19 +1616,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
+@@ -1618,19 +1618,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
(unsigned long long)(sh->sector
+ rdev->data_offset),
bdevname(rdev->bdev, b));
@@ -34251,7 +34228,7 @@ index 6ba4954..5ce122a 100644
if (conf->mddev->degraded >= conf->max_degraded)
printk_ratelimited(
KERN_WARNING
-@@ -1648,7 +1648,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
+@@ -1650,7 +1650,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
(unsigned long long)(sh->sector
+ rdev->data_offset),
bdn);
@@ -34287,7 +34264,7 @@ index a7d876f..8c21b61 100644
struct dvb_demux *demux;
void *priv;
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
-index f732877..d38c35a 100644
+index d5cda35..017af46 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
@@ -35354,7 +35331,7 @@ index e1159e5..e18684d 100644
/* Set media type */
switch (adapter->pdev->device) {
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
-index e556fc3..fa9199d 100644
+index 3072d35..a0f4827 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
@@ -35708,7 +35685,7 @@ index 4a518a3..936b334 100644
#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
-index cc2565c..7325c3c 100644
+index 9e61d6b..852f305 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -702,17 +702,17 @@ struct rtl8169_private {
@@ -35761,10 +35738,10 @@ index c07cfe9..81cbf7e 100644
/* To mask all all interrupts.*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
-index 72cd190..fcf7fb3 100644
+index d4d2bc1..14b8672 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
-@@ -1599,7 +1599,7 @@ static const struct file_operations stmmac_rings_status_fops = {
+@@ -1602,7 +1602,7 @@ static const struct file_operations stmmac_rings_status_fops = {
.open = stmmac_sysfs_ring_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -35773,7 +35750,7 @@ index 72cd190..fcf7fb3 100644
};
static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
-@@ -1671,7 +1671,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
+@@ -1674,7 +1674,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
.open = stmmac_sysfs_dma_cap_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -35782,19 +35759,6 @@ index 72cd190..fcf7fb3 100644
};
static int stmmac_init_fs(struct net_device *dev)
-diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
-index 1b7082d..c786773 100644
---- a/drivers/net/macvtap.c
-+++ b/drivers/net/macvtap.c
-@@ -526,6 +526,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
- }
- base = (unsigned long)from->iov_base + offset1;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
-+ if (i + size >= MAX_SKB_FRAGS)
-+ return -EFAULT;
- num_pages = get_user_pages_fast(base, size, 0, &page[i]);
- if ((num_pages != size) ||
- (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 3ed983c..a1bb418 100644
--- a/drivers/net/ppp/ppp_generic.c
@@ -37337,7 +37301,7 @@ index 9de9db2..1e09660 100644
fc_frame_free(fp);
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
-index db9238f..4378ed2 100644
+index 4868fc9..7f2e028 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
@@ -38323,51 +38287,6 @@ index 0842cc7..61d886d 100644
if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
if (--cmd->outstanding_r2ts < 1) {
iscsit_stop_dataout_timer(cmd);
-diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
-index 65ea65a..93b9406 100644
---- a/drivers/target/target_core_cdb.c
-+++ b/drivers/target/target_core_cdb.c
-@@ -1199,7 +1199,7 @@ int target_emulate_write_same(struct se_task *task)
- if (num_blocks != 0)
- range = num_blocks;
- else
-- range = (dev->transport->get_blocks(dev) - lba);
-+ range = (dev->transport->get_blocks(dev) - lba) + 1;
-
- pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
- (unsigned long long)lba, (unsigned long long)range);
-diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
-index b75bc92..9145141 100644
---- a/drivers/target/target_core_pr.c
-+++ b/drivers/target/target_core_pr.c
-@@ -2042,7 +2042,7 @@ static int __core_scsi3_write_aptpl_to_file(
- if (IS_ERR(file) || !file || !file->f_dentry) {
- pr_err("filp_open(%s) for APTPL metadata"
- " failed\n", path);
-- return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
-+ return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
- }
-
- iov[0].iov_base = &buf[0];
-@@ -3853,7 +3853,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
- " SPC-2 reservation is held, returning"
- " RESERVATION_CONFLICT\n");
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-- ret = EINVAL;
-+ ret = -EINVAL;
- goto out;
- }
-
-@@ -3863,7 +3863,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
- */
- if (!cmd->se_sess) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-- return -EINVAL;
-+ ret = -EINVAL;
-+ goto out;
- }
-
- if (cmd->data_length < 24) {
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 6845228..df77141 100644
--- a/drivers/target/target_core_tmr.c
@@ -38469,19 +38388,6 @@ index 5660916..f6dab21 100644
smp_mb__after_atomic_inc();
}
}
-diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
-index d95cfe2..278819c 100644
---- a/drivers/target/tcm_fc/tfc_cmd.c
-+++ b/drivers/target/tcm_fc/tfc_cmd.c
-@@ -249,6 +249,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
- {
- struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
-
-+ if (cmd->aborted)
-+ return ~0;
- return fc_seq_exch(cmd->seq)->rxid;
- }
-
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index b9040be..e3f5aab 100644
--- a/drivers/tty/hvc/hvcs.c
@@ -39297,10 +39203,10 @@ index 57c01ab..8a05959 100644
/*
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
-index c14c42b..f955cc2 100644
+index ae66278..579de88b 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
-@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
+@@ -631,7 +631,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return 0;
}
@@ -42731,7 +42637,7 @@ index a6395bd..f1e376a 100644
(unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
#ifdef __alpha__
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 6ff96c6..3020df9 100644
+index 6ff96c6..566204e 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -32,6 +32,7 @@
@@ -42862,7 +42768,7 @@ index 6ff96c6..3020df9 100644
error = -ENOMEM;
goto out_close;
}
-@@ -528,6 +552,349 @@ out:
+@@ -528,6 +552,311 @@ out:
return error;
}
@@ -42882,15 +42788,6 @@ index 6ff96c6..3020df9 100644
+ pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ if ((__supported_pte_mask & _PAGE_NX))
-+ pax_flags &= ~MF_PAX_SEGMEXEC;
-+ else
-+ pax_flags &= ~MF_PAX_PAGEEXEC;
-+ }
-+#endif
-+
+#ifdef CONFIG_PAX_EMUTRAMP
+ if (elf_phdata->p_flags & PF_EMUTRAMP)
+ pax_flags |= MF_PAX_EMUTRAMP;
@@ -42924,15 +42821,6 @@ index 6ff96c6..3020df9 100644
+ pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ if ((__supported_pte_mask & _PAGE_NX))
-+ pax_flags &= ~MF_PAX_SEGMEXEC;
-+ else
-+ pax_flags &= ~MF_PAX_PAGEEXEC;
-+ }
-+#endif
-+
+#ifdef CONFIG_PAX_EMUTRAMP
+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
+ pax_flags |= MF_PAX_EMUTRAMP;
@@ -42968,15 +42856,6 @@ index 6ff96c6..3020df9 100644
+ pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ if ((__supported_pte_mask & _PAGE_NX))
-+ pax_flags &= ~MF_PAX_SEGMEXEC;
-+ else
-+ pax_flags &= ~MF_PAX_PAGEEXEC;
-+ }
-+#endif
-+
+#ifdef CONFIG_PAX_EMUTRAMP
+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
+ pax_flags |= MF_PAX_EMUTRAMP;
@@ -43010,15 +42889,6 @@ index 6ff96c6..3020df9 100644
+ pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ if ((__supported_pte_mask & _PAGE_NX))
-+ pax_flags &= ~MF_PAX_SEGMEXEC;
-+ else
-+ pax_flags &= ~MF_PAX_PAGEEXEC;
-+ }
-+#endif
-+
+#ifdef CONFIG_PAX_EMUTRAMP
+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
+ pax_flags |= MF_PAX_EMUTRAMP;
@@ -43038,7 +42908,7 @@ index 6ff96c6..3020df9 100644
+}
+#endif
+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
+{
+ unsigned long pax_flags = 0UL;
@@ -43055,15 +42925,6 @@ index 6ff96c6..3020df9 100644
+ pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ if ((__supported_pte_mask & _PAGE_NX))
-+ pax_flags &= ~MF_PAX_SEGMEXEC;
-+ else
-+ pax_flags &= ~MF_PAX_PAGEEXEC;
-+ }
-+#endif
-+
+#ifdef CONFIG_PAX_EMUTRAMP
+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
+ pax_flags |= MF_PAX_EMUTRAMP;
@@ -43085,19 +42946,17 @@ index 6ff96c6..3020df9 100644
+ pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
+#ifdef CONFIG_PAX_MPROTECT
+ pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#ifdef CONFIG_PAX_RANDMMAP
-+ pax_flags |= MF_PAX_RANDMMAP;
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
-+ pax_flags &= ~MF_PAX_PAGEEXEC;
-+ pax_flags |= MF_PAX_SEGMEXEC;
-+ }
++ if (randomize_va_space)
++ pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+#endif
@@ -43201,6 +43060,15 @@ index 6ff96c6..3020df9 100644
+ if (pt_pax_flags != ~0UL)
+ pax_flags = pt_pax_flags;
+
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
+ if (0 > pax_check_flags(&pax_flags))
+ return -EINVAL;
+
@@ -43212,7 +43080,7 @@ index 6ff96c6..3020df9 100644
/*
* These are the functions used to load ELF style executables and shared
* libraries. There is no binary dependent code anywhere else.
-@@ -544,6 +911,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
+@@ -544,6 +873,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
{
unsigned int random_variable = 0;
@@ -43224,7 +43092,7 @@ index 6ff96c6..3020df9 100644
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE)) {
random_variable = get_random_int() & STACK_RND_MASK;
-@@ -562,7 +934,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -562,7 +896,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long load_addr = 0, load_bias = 0;
int load_addr_set = 0;
char * elf_interpreter = NULL;
@@ -43233,7 +43101,7 @@ index 6ff96c6..3020df9 100644
struct elf_phdr *elf_ppnt, *elf_phdata;
unsigned long elf_bss, elf_brk;
int retval, i;
-@@ -572,11 +944,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -572,11 +906,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long start_code, end_code, start_data, end_data;
unsigned long reloc_func_desc __maybe_unused = 0;
int executable_stack = EXSTACK_DEFAULT;
@@ -43246,7 +43114,7 @@ index 6ff96c6..3020df9 100644
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
if (!loc) {
-@@ -713,11 +1085,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -713,11 +1047,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
/* OK, This is the point of no return */
current->flags &= ~PF_FORKNOEXEC;
@@ -43271,7 +43139,7 @@ index 6ff96c6..3020df9 100644
+
+ current->mm->def_flags = 0;
+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
+ send_sig(SIGKILL, current, 0);
+ goto out_free_dentry;
@@ -43329,7 +43197,7 @@ index 6ff96c6..3020df9 100644
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
-@@ -808,6 +1250,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -808,6 +1212,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
@@ -43350,7 +43218,7 @@ index 6ff96c6..3020df9 100644
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-@@ -840,9 +1296,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -840,9 +1258,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
* allowed task size. Note that p_filesz must always be
* <= p_memsz so it is only necessary to check p_memsz.
*/
@@ -43363,7 +43231,7 @@ index 6ff96c6..3020df9 100644
/* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
retval = -EINVAL;
-@@ -881,11 +1337,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -881,11 +1299,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto out_free_dentry;
}
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
@@ -43407,7 +43275,7 @@ index 6ff96c6..3020df9 100644
if (elf_interpreter) {
unsigned long uninitialized_var(interp_map_addr);
-@@ -1098,7 +1583,7 @@ out:
+@@ -1098,7 +1545,7 @@ out:
* Decide what to dump of a segment, part, all or none.
*/
static unsigned long vma_dump_size(struct vm_area_struct *vma,
@@ -43416,7 +43284,7 @@ index 6ff96c6..3020df9 100644
{
#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
-@@ -1132,7 +1617,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+@@ -1132,7 +1579,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
if (vma->vm_file == NULL)
return 0;
@@ -43425,7 +43293,7 @@ index 6ff96c6..3020df9 100644
goto whole;
/*
-@@ -1354,9 +1839,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+@@ -1354,9 +1801,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
{
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
int i = 0;
@@ -43437,7 +43305,7 @@ index 6ff96c6..3020df9 100644
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
-@@ -1862,14 +2347,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
+@@ -1862,14 +2309,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
}
static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
@@ -43454,7 +43322,7 @@ index 6ff96c6..3020df9 100644
return size;
}
-@@ -1963,7 +2448,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1963,7 +2410,7 @@ static int elf_core_dump(struct coredump_params *cprm)
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
@@ -43463,7 +43331,7 @@ index 6ff96c6..3020df9 100644
offset += elf_core_extra_data_size();
e_shoff = offset;
-@@ -1977,10 +2462,12 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1977,10 +2424,12 @@ static int elf_core_dump(struct coredump_params *cprm)
offset = dataoff;
size += sizeof(*elf);
@@ -43476,7 +43344,7 @@ index 6ff96c6..3020df9 100644
if (size > cprm->limit
|| !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
goto end_coredump;
-@@ -1994,7 +2481,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1994,7 +2443,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
@@ -43485,7 +43353,7 @@ index 6ff96c6..3020df9 100644
phdr.p_memsz = vma->vm_end - vma->vm_start;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
-@@ -2005,6 +2492,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2005,6 +2454,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_align = ELF_EXEC_PAGESIZE;
size += sizeof(phdr);
@@ -43493,7 +43361,7 @@ index 6ff96c6..3020df9 100644
if (size > cprm->limit
|| !dump_write(cprm->file, &phdr, sizeof(phdr)))
goto end_coredump;
-@@ -2029,7 +2517,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2029,7 +2479,7 @@ static int elf_core_dump(struct coredump_params *cprm)
unsigned long addr;
unsigned long end;
@@ -43502,7 +43370,7 @@ index 6ff96c6..3020df9 100644
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
-@@ -2038,6 +2526,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2038,6 +2488,7 @@ static int elf_core_dump(struct coredump_params *cprm)
page = get_dump_page(addr);
if (page) {
void *kaddr = kmap(page);
@@ -43510,7 +43378,7 @@ index 6ff96c6..3020df9 100644
stop = ((size += PAGE_SIZE) > cprm->limit) ||
!dump_write(cprm->file, kaddr,
PAGE_SIZE);
-@@ -2055,6 +2544,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2055,6 +2506,7 @@ static int elf_core_dump(struct coredump_params *cprm)
if (e_phnum == PN_XNUM) {
size += sizeof(*shdr4extnum);
@@ -43518,7 +43386,7 @@ index 6ff96c6..3020df9 100644
if (size > cprm->limit
|| !dump_write(cprm->file, shdr4extnum,
sizeof(*shdr4extnum)))
-@@ -2075,6 +2565,97 @@ out:
+@@ -2075,6 +2527,97 @@ out:
#endif /* CONFIG_ELF_CORE */
@@ -43791,51 +43659,6 @@ index cfb5543..1ae7347 100644
if (!del) {
spin_lock(&rc->reloc_root_tree.lock);
-diff --git a/fs/buffer.c b/fs/buffer.c
-index c807931..4115eca 100644
---- a/fs/buffer.c
-+++ b/fs/buffer.c
-@@ -1087,6 +1087,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
- static struct buffer_head *
- __getblk_slow(struct block_device *bdev, sector_t block, int size)
- {
-+ int ret;
-+ struct buffer_head *bh;
-+
- /* Size must be multiple of hard sectorsize */
- if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
- (size < 512 || size > PAGE_SIZE))) {
-@@ -1099,20 +1102,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
- return NULL;
- }
-
-- for (;;) {
-- struct buffer_head * bh;
-- int ret;
-+retry:
-+ bh = __find_get_block(bdev, block, size);
-+ if (bh)
-+ return bh;
-
-+ ret = grow_buffers(bdev, block, size);
-+ if (ret == 0) {
-+ free_more_memory();
-+ goto retry;
-+ } else if (ret > 0) {
- bh = __find_get_block(bdev, block, size);
- if (bh)
- return bh;
--
-- ret = grow_buffers(bdev, block, size);
-- if (ret < 0)
-- return NULL;
-- if (ret == 0)
-- free_more_memory();
- }
-+ return NULL;
- }
-
- /*
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
index 622f469..e8d2d55 100644
--- a/fs/cachefiles/bind.c
@@ -44623,10 +44446,10 @@ index af11098..81e3bbe 100644
/* Free the char* */
kfree(buf);
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
-index 0dc5a3d..d3cdeea 100644
+index de42310..867dddd 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
-@@ -328,7 +328,7 @@ check_list:
+@@ -338,7 +338,7 @@ check_list:
goto out_unlock_msg_ctx;
i = 5;
if (msg_ctx->msg) {
@@ -44657,24 +44480,8 @@ index 608c1c3..7d040a8 100644
set_fs(fs_save);
return rc;
}
-diff --git a/fs/eventpoll.c b/fs/eventpoll.c
-index 4d9d3a4..a6f3763 100644
---- a/fs/eventpoll.c
-+++ b/fs/eventpoll.c
-@@ -1629,8 +1629,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
- if (op == EPOLL_CTL_ADD) {
- if (is_file_epoll(tfile)) {
- error = -ELOOP;
-- if (ep_loop_check(ep, tfile) != 0)
-+ if (ep_loop_check(ep, tfile) != 0) {
-+ clear_tfile_check_list();
- goto error_tgt_fput;
-+ }
- } else
- list_add(&tfile->f_tfile_llink, &tfile_check_list);
- }
diff --git a/fs/exec.c b/fs/exec.c
-index 160cd2f..52c1678 100644
+index 160cd2f..5cc2091 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -55,12 +55,33 @@
@@ -45194,7 +45001,7 @@ index 160cd2f..52c1678 100644
cn->corename = kmalloc(cn->size, GFP_KERNEL);
cn->used = 0;
-@@ -1815,6 +1948,228 @@ out:
+@@ -1815,6 +1948,250 @@ out:
return ispipe;
}
@@ -45339,7 +45146,7 @@ index 160cd2f..52c1678 100644
+
+#ifdef CONFIG_PAX_USERCOPY
+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
-+int object_is_on_stack(const void *obj, unsigned long len)
++static noinline int check_stack_object(const void *obj, unsigned long len)
+{
+ const void * const stack = task_stack_page(current);
+ const void * const stackend = stack + THREAD_SIZE;
@@ -45385,7 +45192,7 @@ index 160cd2f..52c1678 100644
+#endif
+}
+
-+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
++static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
+{
+ if (current->signal->curr_ip)
+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
@@ -45399,6 +45206,28 @@ index 160cd2f..52c1678 100644
+}
+#endif
+
++void check_object_size(const void *ptr, unsigned long n, bool to)
++{
++
++#ifdef CONFIG_PAX_USERCOPY
++ const char *type;
++
++ if (!n)
++ return;
++
++ type = check_heap_object(ptr, n, to);
++ if (!type) {
++ if (check_stack_object(ptr, n) != -1)
++ return;
++ type = "<process stack>";
++ }
++
++ pax_report_usercopy(ptr, n, to, type);
++#endif
++
++}
++EXPORT_SYMBOL(check_object_size);
++
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+void pax_track_stack(void)
+{
@@ -45423,7 +45252,7 @@ index 160cd2f..52c1678 100644
static int zap_process(struct task_struct *start, int exit_code)
{
struct task_struct *t;
-@@ -2026,17 +2381,17 @@ static void wait_for_dump_helpers(struct file *file)
+@@ -2026,17 +2403,17 @@ static void wait_for_dump_helpers(struct file *file)
pipe = file->f_path.dentry->d_inode->i_pipe;
pipe_lock(pipe);
@@ -45446,7 +45275,7 @@ index 160cd2f..52c1678 100644
pipe_unlock(pipe);
}
-@@ -2097,7 +2452,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2097,7 +2474,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
int retval = 0;
int flag = 0;
int ispipe;
@@ -45455,7 +45284,7 @@ index 160cd2f..52c1678 100644
struct coredump_params cprm = {
.signr = signr,
.regs = regs,
-@@ -2112,6 +2467,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2112,6 +2489,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
audit_core_dumps(signr);
@@ -45465,7 +45294,7 @@ index 160cd2f..52c1678 100644
binfmt = mm->binfmt;
if (!binfmt || !binfmt->core_dump)
goto fail;
-@@ -2179,7 +2537,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2179,7 +2559,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
}
cprm.limit = RLIM_INFINITY;
@@ -45474,7 +45303,7 @@ index 160cd2f..52c1678 100644
if (core_pipe_limit && (core_pipe_limit < dump_count)) {
printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
task_tgid_vnr(current), current->comm);
-@@ -2206,6 +2564,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2206,6 +2586,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
} else {
struct inode *inode;
@@ -45483,7 +45312,7 @@ index 160cd2f..52c1678 100644
if (cprm.limit < binfmt->min_coredump)
goto fail_unlock;
-@@ -2249,7 +2609,7 @@ close_fail:
+@@ -2249,7 +2631,7 @@ close_fail:
filp_close(cprm.file, NULL);
fail_dropcount:
if (ispipe)
@@ -45492,7 +45321,7 @@ index 160cd2f..52c1678 100644
fail_unlock:
kfree(cn.corename);
fail_corename:
-@@ -2268,7 +2628,7 @@ fail:
+@@ -2268,7 +2650,7 @@ fail:
*/
int dump_write(struct file *file, const void *addr, int nr)
{
@@ -45740,27 +45569,10 @@ index 22764c7..86372c9 100644
break;
err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
diff --git a/fs/fifo.c b/fs/fifo.c
-index b1a524d..3d7942c 100644
+index cf6f434..3d7942c 100644
--- a/fs/fifo.c
+++ b/fs/fifo.c
-@@ -14,7 +14,7 @@
- #include <linux/sched.h>
- #include <linux/pipe_fs_i.h>
-
--static void wait_for_partner(struct inode* inode, unsigned int *cnt)
-+static int wait_for_partner(struct inode* inode, unsigned int *cnt)
- {
- int cur = *cnt;
-
-@@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
- if (signal_pending(current))
- break;
- }
-+ return cur == *cnt ? -ERESTARTSYS : 0;
- }
-
- static void wake_up_partner(struct inode* inode)
-@@ -58,17 +59,16 @@ static int fifo_open(struct inode *inode, struct file *filp)
+@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
*/
filp->f_op = &read_pipefifo_fops;
pipe->r_counter++;
@@ -45773,15 +45585,7 @@ index b1a524d..3d7942c 100644
if ((filp->f_flags & O_NONBLOCK)) {
/* suppress POLLHUP until we have
* seen a writer */
- filp->f_version = pipe->w_counter;
- } else {
-- wait_for_partner(inode, &pipe->w_counter);
-- if(signal_pending(current))
-+ if (wait_for_partner(inode, &pipe->w_counter))
- goto err_rd;
- }
- }
-@@ -81,17 +81,16 @@ static int fifo_open(struct inode *inode, struct file *filp)
+@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
* errno=ENXIO when there is no process reading the FIFO.
*/
ret = -ENXIO;
@@ -45796,14 +45600,11 @@ index b1a524d..3d7942c 100644
wake_up_partner(inode);
- if (!pipe->readers) {
-- wait_for_partner(inode, &pipe->r_counter);
-- if (signal_pending(current))
+ if (!atomic_read(&pipe->readers)) {
-+ if (wait_for_partner(inode, &pipe->r_counter))
+ if (wait_for_partner(inode, &pipe->r_counter))
goto err_wr;
}
- break;
-@@ -105,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
+@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
*/
filp->f_op = &rdwr_pipefifo_fops;
@@ -45818,7 +45619,7 @@ index b1a524d..3d7942c 100644
wake_up_partner(inode);
break;
-@@ -123,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
+@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
return 0;
err_rd:
@@ -47344,10 +47145,10 @@ index cfd4959..a780959 100644
kfree(s);
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
-index 2d0ca24..c4b8676511 100644
+index ebc2f4d..eb1c5cd 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
-@@ -908,7 +908,7 @@ static struct file_system_type hugetlbfs_fs_type = {
+@@ -896,7 +896,7 @@ static struct file_system_type hugetlbfs_fs_type = {
.kill_sb = kill_litter_super,
};
@@ -47418,7 +47219,7 @@ index b09e51d..e482afa 100644
/*
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
-index a44eff0..462e07d 100644
+index a44eff076..462e07d 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
@@ -47477,7 +47278,7 @@ index 8392cb8..80d6193 100644
memcpy(c->data, &cookie, 4);
c->len=4;
diff --git a/fs/locks.c b/fs/locks.c
-index 0d68f1f..c3dacf2 100644
+index 6a64f15..c3dacf2 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -308,7 +308,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
@@ -47507,15 +47308,6 @@ index 0d68f1f..c3dacf2 100644
{
struct file_lock *fl = locks_alloc_lock();
int error = -ENOMEM;
-@@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
- case F_WRLCK:
- return generic_add_lease(filp, arg, flp);
- default:
-- BUG();
-+ return -EINVAL;
- }
- }
- EXPORT_SYMBOL(generic_setlease);
@@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
return;
@@ -48280,19 +48072,6 @@ index c587e2d..3641eaa 100644
-const struct inode_operations ntfs_empty_inode_ops = {};
+const struct inode_operations ntfs_empty_inode_ops __read_only;
-diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
-index 07ee5b4..1c7d45e 100644
---- a/fs/ocfs2/file.c
-+++ b/fs/ocfs2/file.c
-@@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
- if (ret < 0)
- mlog_errno(ret);
-
-- if (file->f_flags & O_SYNC)
-+ if (file && (file->f_flags & O_SYNC))
- handle->h_sync = 1;
-
- ocfs2_commit_trans(osb, handle);
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 210c352..a174f83 100644
--- a/fs/ocfs2/localalloc.c
@@ -60572,7 +60351,7 @@ index 6cd5b64..f620d2d 100644
#define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
-index b7babf0..71e4e74 100644
+index b7babf0..3ba8aee 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -22,6 +22,12 @@
@@ -60825,7 +60604,7 @@ index b7babf0..71e4e74 100644
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
-@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+@@ -255,4 +393,55 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
#endif /* BITS_PER_LONG == 64 */
@@ -60843,6 +60622,10 @@ index b7babf0..71e4e74 100644
+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
++#ifdef CONFIG_X86
++ atomic_clear_mask_unchecked(0, NULL);
++ atomic_set_mask_unchecked(0, NULL);
++#endif
+
+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
@@ -60864,6 +60647,8 @@ index b7babf0..71e4e74 100644
+#define atomic_dec_unchecked(v) atomic_dec(v)
+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
++#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
++#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
+
+#define atomic_long_read_unchecked(v) atomic_long_read(v)
+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
@@ -60875,6 +60660,19 @@ index b7babf0..71e4e74 100644
+#endif
+
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
+index e37963c..6f5b60b 100644
+--- a/include/asm-generic/atomic.h
++++ b/include/asm-generic/atomic.h
+@@ -158,7 +158,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ * Atomically clears the bits set in @mask from @v
+ */
+ #ifndef atomic_clear_mask
+-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+ {
+ unsigned long flags;
+
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index b18ce4f..2ee2843 100644
--- a/include/asm-generic/atomic64.h
@@ -61358,10 +61156,10 @@ index 04ffb2e..6799180 100644
extern struct cleancache_ops
cleancache_register_ops(struct cleancache_ops *ops);
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
-index dfadc96..d90deca 100644
+index dfadc96..441a641 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
-@@ -31,6 +31,15 @@
+@@ -31,6 +31,20 @@
#if __GNUC_MINOR__ >= 5
@@ -61374,10 +61172,15 @@ index dfadc96..d90deca 100644
+#ifdef SIZE_OVERFLOW_PLUGIN
+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
+#endif
++
++#ifdef LATENT_ENTROPY_PLUGIN
++#define __latent_entropy __attribute__((latent_entropy))
++#endif
++
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
-@@ -46,6 +55,11 @@
+@@ -46,6 +60,11 @@
#define __noclone __attribute__((__noclone__))
#endif
@@ -61390,7 +61193,7 @@ index dfadc96..d90deca 100644
#if __GNUC_MINOR__ > 0
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
-index 320d6c9..1221a6b 100644
+index 320d6c9..066b6d5 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -5,31 +5,62 @@
@@ -61466,7 +61269,7 @@ index 320d6c9..1221a6b 100644
#endif
#ifdef __KERNEL__
-@@ -264,6 +297,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -264,6 +297,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __attribute_const__ /* unimplemented */
#endif
@@ -61481,10 +61284,15 @@ index 320d6c9..1221a6b 100644
+#ifndef __size_overflow
+# define __size_overflow(...)
+#endif
++
++#ifndef __latent_entropy
++# define __latent_entropy
++#endif
++
/*
* Tell gcc if a function is cold. The compiler will assume any path
* directly leading to the call is unlikely.
-@@ -273,6 +317,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -273,6 +322,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define __cold
#endif
@@ -61507,7 +61315,7 @@ index 320d6c9..1221a6b 100644
/* Simple shorthand for a section definition */
#ifndef __section
# define __section(S) __attribute__ ((__section__(#S)))
-@@ -306,6 +366,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -306,6 +371,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
* use is to mediate communication between process-level code and irq/NMI
* handlers, all running on the same CPU.
*/
@@ -61844,6 +61652,49 @@ index 4eec461..84c73cf 100644
struct disk_events *ev;
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity *integrity;
+diff --git a/include/linux/gfp.h b/include/linux/gfp.h
+index 3a76faf..c0592c7 100644
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -37,6 +37,12 @@ struct vm_area_struct;
+ #define ___GFP_NO_KSWAPD 0x400000u
+ #define ___GFP_OTHER_NODE 0x800000u
+
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++#define ___GFP_USERCOPY 0x1000000u
++#else
++#define ___GFP_USERCOPY 0
++#endif
++
+ /*
+ * GFP bitmasks..
+ *
+@@ -85,6 +91,7 @@ struct vm_area_struct;
+
+ #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
+ #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
++#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
+
+ /*
+ * This may seem redundant, but it's a way of annotating false positives vs.
+@@ -92,7 +99,7 @@ struct vm_area_struct;
+ */
+ #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
+
+-#define __GFP_BITS_SHIFT 24 /* Room for N __GFP_FOO bits */
++#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
+ #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+
+ /* This equals 0, but use constants in case they ever change */
+@@ -146,6 +153,8 @@ struct vm_area_struct;
+ /* 4GB DMA on some platforms */
+ #define GFP_DMA32 __GFP_DMA32
+
++#define GFP_USERCOPY __GFP_USERCOPY
++
+ /* Convert GFP flags to their corresponding migrate type */
+ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
+ {
diff --git a/include/linux/gracl.h b/include/linux/gracl.h
new file mode 100644
index 0000000..c938b1f
@@ -62998,10 +62849,54 @@ index a6deef4..c56a7f2 100644
and pointers */
#endif
diff --git a/include/linux/init.h b/include/linux/init.h
-index 9146f39..885354d 100644
+index 9146f39..e19693b 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
-@@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
+@@ -38,9 +38,15 @@
+ * Also note, that this data cannot be "const".
+ */
+
++#ifdef MODULE
++#define add_latent_entropy
++#else
++#define add_latent_entropy __latent_entropy
++#endif
++
+ /* These are for everybody (although not all archs will actually
+ discard it in modules) */
+-#define __init __section(.init.text) __cold notrace
++#define __init __section(.init.text) __cold notrace add_latent_entropy
+ #define __initdata __section(.init.data)
+ #define __initconst __section(.init.rodata)
+ #define __exitdata __section(.exit.data)
+@@ -82,7 +88,7 @@
+ #define __exit __section(.exit.text) __exitused __cold notrace
+
+ /* Used for HOTPLUG */
+-#define __devinit __section(.devinit.text) __cold notrace
++#define __devinit __section(.devinit.text) __cold notrace add_latent_entropy
+ #define __devinitdata __section(.devinit.data)
+ #define __devinitconst __section(.devinit.rodata)
+ #define __devexit __section(.devexit.text) __exitused __cold notrace
+@@ -90,7 +96,7 @@
+ #define __devexitconst __section(.devexit.rodata)
+
+ /* Used for HOTPLUG_CPU */
+-#define __cpuinit __section(.cpuinit.text) __cold notrace
++#define __cpuinit __section(.cpuinit.text) __cold notrace add_latent_entropy
+ #define __cpuinitdata __section(.cpuinit.data)
+ #define __cpuinitconst __section(.cpuinit.rodata)
+ #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
+@@ -98,7 +104,7 @@
+ #define __cpuexitconst __section(.cpuexit.rodata)
+
+ /* Used for MEMORY_HOTPLUG */
+-#define __meminit __section(.meminit.text) __cold notrace
++#define __meminit __section(.meminit.text) __cold notrace add_latent_entropy
+ #define __meminitdata __section(.meminit.data)
+ #define __meminitconst __section(.meminit.rodata)
+ #define __memexit __section(.memexit.text) __exitused __cold notrace
+@@ -293,13 +299,13 @@ void __init parse_early_options(char *cmdline);
/* Each module must use one module_init(). */
#define module_init(initfn) \
@@ -63517,7 +63412,7 @@ index 1d1b1e1..2a13c78 100644
#define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
-index 188cb2f..d401c76 100644
+index 905b1e1..d401c76 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -369,7 +369,7 @@ struct zone {
@@ -63529,15 +63424,6 @@ index 188cb2f..d401c76 100644
/*
* The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
-@@ -652,7 +652,7 @@ typedef struct pglist_data {
- range, including holes */
- int node_id;
- wait_queue_head_t kswapd_wait;
-- struct task_struct *kswapd;
-+ struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
- int kswapd_max_order;
- enum zone_type classzone_idx;
- } pg_data_t;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 468819c..17b9db3 100644
--- a/include/linux/mod_devicetable.h
@@ -64007,10 +63893,21 @@ index 800f113..12c82ec 100644
}
diff --git a/include/linux/random.h b/include/linux/random.h
-index 8f74538..02a1012 100644
+index 8f74538..de61694 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
-@@ -69,12 +69,17 @@ void srandom32(u32 seed);
+@@ -54,6 +54,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value);
+ extern void add_interrupt_randomness(int irq);
+
++#ifdef CONFIG_PAX_LATENT_ENTROPY
++extern void transfer_latent_entropy(void);
++#endif
++
+ extern void get_random_bytes(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
+
+@@ -69,12 +73,17 @@ void srandom32(u32 seed);
u32 prandom32(struct rnd_state *);
@@ -64148,7 +64045,7 @@ index 2148b12..519b820 100644
static inline void anon_vma_merge(struct vm_area_struct *vma,
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 1c4f3e9..342eb1f 100644
+index 5afa2a3..98df553 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -101,6 +101,7 @@ struct bio_list;
@@ -64332,12 +64229,12 @@ index 1c4f3e9..342eb1f 100644
+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
+extern void pax_report_refcount_overflow(struct pt_regs *regs);
-+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
+
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-@@ -2081,7 +2173,9 @@ void yield(void);
+@@ -2089,7 +2181,9 @@ void yield(void);
extern struct exec_domain default_exec_domain;
union thread_union {
@@ -64347,7 +64244,7 @@ index 1c4f3e9..342eb1f 100644
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
-@@ -2114,6 +2208,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2122,6 +2216,7 @@ extern struct pid_namespace init_pid_ns;
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -64355,7 +64252,7 @@ index 1c4f3e9..342eb1f 100644
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
-@@ -2235,6 +2330,12 @@ static inline void mmdrop(struct mm_struct * mm)
+@@ -2243,6 +2338,12 @@ static inline void mmdrop(struct mm_struct * mm)
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
@@ -64368,7 +64265,7 @@ index 1c4f3e9..342eb1f 100644
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
/* Allocate a new mm structure and copy contents from tsk->mm */
-@@ -2251,7 +2352,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2259,7 +2360,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -64377,7 +64274,7 @@ index 1c4f3e9..342eb1f 100644
extern void daemonize(const char *, ...);
extern int allow_signal(int);
-@@ -2416,13 +2517,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2424,9 +2525,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#endif
@@ -64389,14 +64286,6 @@ index 1c4f3e9..342eb1f 100644
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
-
-+#ifdef CONFIG_PAX_USERCOPY
-+extern int object_is_on_stack(const void *obj, unsigned long len);
-+#endif
-+
- extern void thread_info_cache_init(void);
-
- #ifdef CONFIG_DEBUG_STACK_USAGE
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 899fbb4..1cb4138 100644
--- a/include/linux/screen_info.h
@@ -64461,10 +64350,10 @@ index 92808b8..c28cac4 100644
/* shm_mode upper byte flags */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index bdb4590..961638c 100644
+index 53dc7e7..bb5915f 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -643,7 +643,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
+@@ -640,7 +640,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
*/
static inline int skb_queue_empty(const struct sk_buff_head *list)
{
@@ -64473,7 +64362,7 @@ index bdb4590..961638c 100644
}
/**
-@@ -656,7 +656,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
+@@ -653,7 +653,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
static inline bool skb_queue_is_last(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
@@ -64482,7 +64371,7 @@ index bdb4590..961638c 100644
}
/**
-@@ -669,7 +669,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
+@@ -666,7 +666,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
static inline bool skb_queue_is_first(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
@@ -64491,7 +64380,7 @@ index bdb4590..961638c 100644
}
/**
-@@ -1546,7 +1546,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
+@@ -1543,7 +1543,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
@@ -64501,7 +64390,7 @@ index bdb4590..961638c 100644
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
diff --git a/include/linux/slab.h b/include/linux/slab.h
-index 573c809..07e1f43 100644
+index 573c809..a6e62c9 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -11,12 +11,20 @@
@@ -64516,7 +64405,7 @@ index 573c809..07e1f43 100644
*/
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
+
-+#ifdef CONFIG_PAX_USERCOPY
++#ifdef CONFIG_PAX_USERCOPY_SLABS
+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
+#else
+#define SLAB_USERCOPY 0x00000000UL
@@ -64542,7 +64431,7 @@ index 573c809..07e1f43 100644
/*
* struct kmem_cache related prototypes
-@@ -156,11 +167,12 @@ unsigned int kmem_cache_size(struct kmem_cache *);
+@@ -156,11 +167,13 @@ unsigned int kmem_cache_size(struct kmem_cache *);
/*
* Common kmalloc functions provided by all allocators
*/
@@ -64553,11 +64442,12 @@ index 573c809..07e1f43 100644
void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
-+void check_object_size(const void *ptr, unsigned long n, bool to);
++const char *check_heap_object(const void *ptr, unsigned long n, bool to);
++bool is_usercopy_object(const void *ptr);
/*
* Allocator specific definitions. These are mainly used to establish optimized
-@@ -287,7 +299,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
+@@ -287,7 +300,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
@@ -64566,7 +64456,7 @@ index 573c809..07e1f43 100644
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
#else
-@@ -306,7 +318,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
+@@ -306,7 +319,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
@@ -64576,7 +64466,7 @@ index 573c809..07e1f43 100644
__kmalloc_node_track_caller(size, flags, node, \
_RET_IP_)
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
-index d00e0ba..d61fb1f 100644
+index d00e0ba..f75c968 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -68,10 +68,10 @@ struct kmem_cache {
@@ -64594,7 +64484,16 @@ index d00e0ba..d61fb1f 100644
/*
* If debugging is enabled, then the allocator can add additional
-@@ -109,7 +109,7 @@ struct cache_sizes {
+@@ -105,11 +105,16 @@ struct cache_sizes {
+ #ifdef CONFIG_ZONE_DMA
+ struct kmem_cache *cs_dmacachep;
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ struct kmem_cache *cs_usercopycachep;
++#endif
++
+ };
extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
@@ -64603,7 +64502,7 @@ index d00e0ba..d61fb1f 100644
#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(size_t size,
-@@ -127,6 +127,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep)
+@@ -127,6 +132,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep)
}
#endif
@@ -64611,7 +64510,21 @@ index d00e0ba..d61fb1f 100644
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *cachep;
-@@ -162,7 +163,7 @@ found:
+@@ -152,6 +158,13 @@ found:
+ cachep = malloc_sizes[i].cs_dmacachep;
+ else
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ if (flags & GFP_USERCOPY)
++ cachep = malloc_sizes[i].cs_usercopycachep;
++ else
++#endif
++
+ cachep = malloc_sizes[i].cs_cachep;
+
+ ret = kmem_cache_alloc_trace(size, cachep, flags);
+@@ -162,7 +175,7 @@ found:
}
#ifdef CONFIG_NUMA
@@ -64620,7 +64533,7 @@ index d00e0ba..d61fb1f 100644
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
#ifdef CONFIG_TRACING
-@@ -181,6 +182,7 @@ kmem_cache_alloc_node_trace(size_t size,
+@@ -181,6 +194,7 @@ kmem_cache_alloc_node_trace(size_t size,
}
#endif
@@ -64628,6 +64541,20 @@ index d00e0ba..d61fb1f 100644
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *cachep;
+@@ -205,6 +219,13 @@ found:
+ cachep = malloc_sizes[i].cs_dmacachep;
+ else
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ if (flags & GFP_USERCOPY)
++ cachep = malloc_sizes[i].cs_usercopycachep;
++ else
++#endif
++
+ cachep = malloc_sizes[i].cs_cachep;
+
+ return kmem_cache_alloc_node_trace(size, cachep, flags, node);
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 0ec00b3..65e7e0e 100644
--- a/include/linux/slob_def.h
@@ -66182,7 +66109,7 @@ index 2531811..040d4d4 100644
next_state = Reset;
return 0;
diff --git a/init/main.c b/init/main.c
-index cb08fea2..f5b850d 100644
+index cb08fea2..e9a9598 100644
--- a/init/main.c
+++ b/init/main.c
@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
@@ -66272,7 +66199,39 @@ index cb08fea2..f5b850d 100644
}
return ret;
-@@ -821,7 +867,7 @@ static int __init kernel_init(void * unused)
+@@ -711,8 +757,14 @@ static void __init do_initcalls(void)
+ {
+ initcall_t *fn;
+
+- for (fn = __early_initcall_end; fn < __initcall_end; fn++)
++ for (fn = __early_initcall_end; fn < __initcall_end; fn++) {
+ do_one_initcall(*fn);
++
++#ifdef CONFIG_PAX_LATENT_ENTROPY
++ transfer_latent_entropy();
++#endif
++
++ }
+ }
+
+ /*
+@@ -738,8 +790,14 @@ static void __init do_pre_smp_initcalls(void)
+ {
+ initcall_t *fn;
+
+- for (fn = __initcall_start; fn < __early_initcall_end; fn++)
++ for (fn = __initcall_start; fn < __early_initcall_end; fn++) {
+ do_one_initcall(*fn);
++
++#ifdef CONFIG_PAX_LATENT_ENTROPY
++ transfer_latent_entropy();
++#endif
++
++ }
+ }
+
+ static void run_init_process(const char *init_filename)
+@@ -821,7 +879,7 @@ static int __init kernel_init(void * unused)
do_basic_setup();
/* Open the /dev/console on the rootfs, this should never fail */
@@ -66281,7 +66240,7 @@ index cb08fea2..f5b850d 100644
printk(KERN_WARNING "Warning: unable to open an initial console.\n");
(void) sys_dup(0);
-@@ -834,11 +880,13 @@ static int __init kernel_init(void * unused)
+@@ -834,11 +892,13 @@ static int __init kernel_init(void * unused)
if (!ramdisk_execute_command)
ramdisk_execute_command = "/init";
@@ -67635,18 +67594,18 @@ index 9b22d03..6295b62 100644
prev->next = info->next;
else
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
-index ae34bf5..4e2f3d0 100644
+index 6db7a5e..25b6648 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
-@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
+@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
local_irq_restore(flags);
}
-static void run_hrtimer_softirq(struct softirq_action *h)
+static void run_hrtimer_softirq(void)
{
- hrtimer_peek_ahead_timers();
- }
+ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 66ff710..794bc5a 100644
--- a/kernel/jump_label.c
@@ -69889,10 +69848,10 @@ index 3d9f31c..7fefc9e 100644
default:
diff --git a/kernel/sched.c b/kernel/sched.c
-index 576a27f..b8f518c 100644
+index 52ac69b..b102f7f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
-@@ -5097,6 +5097,8 @@ int can_nice(const struct task_struct *p, const int nice)
+@@ -5227,6 +5227,8 @@ int can_nice(const struct task_struct *p, const int nice)
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
@@ -69901,7 +69860,7 @@ index 576a27f..b8f518c 100644
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
-@@ -5130,7 +5132,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+@@ -5260,7 +5262,8 @@ SYSCALL_DEFINE1(nice, int, increment)
if (nice > 19)
nice = 19;
@@ -69911,7 +69870,7 @@ index 576a27f..b8f518c 100644
return -EPERM;
retval = security_task_setnice(current, nice);
-@@ -5287,6 +5290,7 @@ recheck:
+@@ -5417,6 +5420,7 @@ recheck:
unsigned long rlim_rtprio =
task_rlimit(p, RLIMIT_RTPRIO);
@@ -70706,7 +70665,7 @@ index fd4a7b1..fae5c2a 100644
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
tick_broadcast_clear_oneshot(cpu);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
-index 2378413..be455fd 100644
+index 03e67d4..21ae77b 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -14,6 +14,7 @@
@@ -70717,7 +70676,7 @@ index 2378413..be455fd 100644
#include <linux/syscore_ops.h>
#include <linux/clocksource.h>
#include <linux/jiffies.h>
-@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
+@@ -385,6 +386,8 @@ int do_settimeofday(const struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
@@ -71576,10 +71535,10 @@ index 8f005e9..1cb1036 100644
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 5f5c545..c8312c8 100644
+index 7c535b0..1a2d14f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
-@@ -2356,6 +2356,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2435,6 +2435,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
return 1;
}
@@ -71607,7 +71566,7 @@ index 5f5c545..c8312c8 100644
/*
* Hugetlb_cow() should be called with page lock of the original hugepage held.
*/
-@@ -2458,6 +2479,11 @@ retry_avoidcopy:
+@@ -2537,6 +2558,11 @@ retry_avoidcopy:
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page);
hugepage_add_new_anon_rmap(new_page, vma, address);
@@ -71619,7 +71578,7 @@ index 5f5c545..c8312c8 100644
/* Make the old page be freed below */
new_page = old_page;
mmu_notifier_invalidate_range_end(mm,
-@@ -2609,6 +2635,10 @@ retry:
+@@ -2688,6 +2714,10 @@ retry:
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
@@ -71630,7 +71589,7 @@ index 5f5c545..c8312c8 100644
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
-@@ -2638,6 +2668,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2717,6 +2747,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);
@@ -71641,7 +71600,7 @@ index 5f5c545..c8312c8 100644
ptep = huge_pte_offset(mm, address);
if (ptep) {
entry = huge_ptep_get(ptep);
-@@ -2649,6 +2683,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2728,6 +2762,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
VM_FAULT_SET_HINDEX(h - hstates);
}
@@ -72629,7 +72588,7 @@ index 177aca4..ab3a744 100644
err = -EPERM;
goto out;
diff --git a/mm/mlock.c b/mm/mlock.c
-index 4f4f53b..9511904 100644
+index 4f4f53b..de8e432 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -13,6 +13,7 @@
@@ -72640,6 +72599,15 @@ index 4f4f53b..9511904 100644
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/rmap.h>
+@@ -376,7 +377,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
+ {
+ unsigned long nstart, end, tmp;
+ struct vm_area_struct * vma, * prev;
+- int error;
++ int error = 0;
+
+ VM_BUG_ON(start & ~PAGE_MASK);
+ VM_BUG_ON(len != PAGE_ALIGN(len));
@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
return -EINVAL;
if (end == start)
@@ -74723,7 +74691,7 @@ index 7a82174..75d1c8b 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index 83311c9a..fcf8f86 100644
+index 83311c9a..2449631 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -151,7 +151,7 @@
@@ -74777,16 +74745,36 @@ index 83311c9a..fcf8f86 100644
{
u32 offset = (obj - slab->s_mem);
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
-@@ -564,7 +564,7 @@ struct cache_names {
+@@ -559,12 +559,13 @@ EXPORT_SYMBOL(malloc_sizes);
+ struct cache_names {
+ char *name;
+ char *name_dma;
++ char *name_usercopy;
+ };
+
static struct cache_names __initdata cache_names[] = {
- #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
+-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
++#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
#include <linux/kmalloc_sizes.h>
- {NULL,}
+ {NULL}
#undef CACHE
};
-@@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
+@@ -752,6 +753,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
+ if (unlikely(gfpflags & GFP_DMA))
+ return csizep->cs_dmacachep;
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ if (unlikely(gfpflags & GFP_USERCOPY))
++ return csizep->cs_usercopycachep;
++#endif
++
+ return csizep->cs_cachep;
+ }
+
+@@ -1572,7 +1579,7 @@ void __init kmem_cache_init(void)
sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
@@ -74795,7 +74783,7 @@ index 83311c9a..fcf8f86 100644
NULL);
if (INDEX_AC != INDEX_L3) {
-@@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
+@@ -1580,7 +1587,7 @@ void __init kmem_cache_init(void)
kmem_cache_create(names[INDEX_L3].name,
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
@@ -74804,7 +74792,7 @@ index 83311c9a..fcf8f86 100644
NULL);
}
-@@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
+@@ -1598,7 +1605,7 @@ void __init kmem_cache_init(void)
sizes->cs_cachep = kmem_cache_create(names->name,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
@@ -74813,7 +74801,24 @@ index 83311c9a..fcf8f86 100644
NULL);
}
#ifdef CONFIG_ZONE_DMA
-@@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
+@@ -1610,6 +1617,16 @@ void __init kmem_cache_init(void)
+ SLAB_PANIC,
+ NULL);
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ sizes->cs_usercopycachep = kmem_cache_create(
++ names->name_usercopy,
++ sizes->cs_size,
++ ARCH_KMALLOC_MINALIGN,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
++ NULL);
++#endif
++
+ sizes++;
+ names++;
+ }
+@@ -4322,10 +4339,10 @@ static int s_show(struct seq_file *m, void *p)
}
/* cpu stats */
{
@@ -74828,7 +74833,7 @@ index 83311c9a..fcf8f86 100644
seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
allochit, allocmiss, freehit, freemiss);
-@@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
+@@ -4584,13 +4601,68 @@ static int __init slab_proc_init(void)
{
proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
#ifdef CONFIG_DEBUG_SLAB_LEAK
@@ -74840,60 +74845,66 @@ index 83311c9a..fcf8f86 100644
module_init(slab_proc_init);
#endif
-+void check_object_size(const void *ptr, unsigned long n, bool to)
++bool is_usercopy_object(const void *ptr)
+{
++ struct page *page;
++ struct kmem_cache *cachep;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ return false;
++
++ if (!virt_addr_valid(ptr))
++ return false;
++
++ page = virt_to_head_page(ptr);
++
++ if (!PageSlab(page))
++ return false;
++
++ cachep = page_get_cache(page);
++ return cachep->flags & SLAB_USERCOPY;
++}
+
+#ifdef CONFIG_PAX_USERCOPY
++const char *check_heap_object(const void *ptr, unsigned long n, bool to)
++{
+ struct page *page;
-+ struct kmem_cache *cachep = NULL;
++ struct kmem_cache *cachep;
+ struct slab *slabp;
+ unsigned int objnr;
+ unsigned long offset;
-+ const char *type;
-+
-+ if (!n)
-+ return;
+
-+ type = "<null>";
+ if (ZERO_OR_NULL_PTR(ptr))
-+ goto report;
++ return "<null>";
+
+ if (!virt_addr_valid(ptr))
-+ return;
++ return NULL;
+
+ page = virt_to_head_page(ptr);
+
-+ type = "<process stack>";
-+ if (!PageSlab(page)) {
-+ if (object_is_on_stack(ptr, n) == -1)
-+ goto report;
-+ return;
-+ }
++ if (!PageSlab(page))
++ return NULL;
+
+ cachep = page_get_cache(page);
-+ type = cachep->name;
+ if (!(cachep->flags & SLAB_USERCOPY))
-+ goto report;
++ return cachep->name;
+
+ slabp = page_get_slab(page);
+ objnr = obj_to_index(cachep, slabp, ptr);
+ BUG_ON(objnr >= cachep->num);
+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
-+ return;
-+
-+report:
-+ pax_report_usercopy(ptr, n, to, type);
-+#endif
++ return NULL;
+
++ return cachep->name;
+}
-+EXPORT_SYMBOL(check_object_size);
++#endif
+
/**
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
diff --git a/mm/slob.c b/mm/slob.c
-index 8105be4..e045f96 100644
+index 8105be4..3c15e57 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -29,7 +29,7 @@
@@ -75044,7 +75055,7 @@ index 8105be4..e045f96 100644
return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
-@@ -533,13 +547,92 @@ void kfree(const void *block)
+@@ -533,13 +547,83 @@ void kfree(const void *block)
sp = slob_page(block);
if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
@@ -75062,40 +75073,34 @@ index 8105be4..e045f96 100644
}
EXPORT_SYMBOL(kfree);
-+void check_object_size(const void *ptr, unsigned long n, bool to)
++bool is_usercopy_object(const void *ptr)
+{
++ return false;
++}
+
+#ifdef CONFIG_PAX_USERCOPY
++const char *check_heap_object(const void *ptr, unsigned long n, bool to)
++{
+ struct slob_page *sp;
+ const slob_t *free;
+ const void *base;
+ unsigned long flags;
-+ const char *type;
-+
-+ if (!n)
-+ return;
+
-+ type = "<null>";
+ if (ZERO_OR_NULL_PTR(ptr))
-+ goto report;
++ return "<null>";
+
+ if (!virt_addr_valid(ptr))
-+ return;
++ return NULL;
+
-+ type = "<process stack>";
+ sp = slob_page(ptr);
-+ if (!PageSlab((struct page *)sp)) {
-+ if (object_is_on_stack(ptr, n) == -1)
-+ goto report;
-+ return;
-+ }
++ if (!PageSlab((struct page *)sp))
++ return NULL;
+
-+ type = "<slob>";
+ if (sp->size) {
+ base = page_address(&sp->page);
+ if (base <= ptr && n <= sp->size - (ptr - base))
-+ return;
-+ goto report;
++ return NULL;
++ return "<slob>";
+ }
+
+ /* some tricky double walking to find the chunk */
@@ -75126,21 +75131,18 @@ index 8105be4..e045f96 100644
+ break;
+
+ spin_unlock_irqrestore(&slob_lock, flags);
-+ return;
++ return NULL;
+ }
+
+ spin_unlock_irqrestore(&slob_lock, flags);
-+report:
-+ pax_report_usercopy(ptr, n, to, type);
-+#endif
-+
++ return "<slob>";
+}
-+EXPORT_SYMBOL(check_object_size);
++#endif
+
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
size_t ksize(const void *block)
{
-@@ -552,10 +645,10 @@ size_t ksize(const void *block)
+@@ -552,10 +636,10 @@ size_t ksize(const void *block)
sp = slob_page(block);
if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
@@ -75154,11 +75156,11 @@ index 8105be4..e045f96 100644
}
EXPORT_SYMBOL(ksize);
-@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+@@ -571,8 +655,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
{
struct kmem_cache *c;
-+#ifdef CONFIG_PAX_USERCOPY
++#ifdef CONFIG_PAX_USERCOPY_SLABS
+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
+#else
@@ -75168,11 +75170,11 @@ index 8105be4..e045f96 100644
if (c) {
c->name = name;
-@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+@@ -614,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
lockdep_trace_alloc(flags);
-+#ifdef CONFIG_PAX_USERCOPY
++#ifdef CONFIG_PAX_USERCOPY_SLABS
+ b = __kmalloc_node_align(c->size, flags, node, c->align);
+#else
if (c->size < PAGE_SIZE) {
@@ -75194,7 +75196,7 @@ index 8105be4..e045f96 100644
if (c->ctor)
c->ctor(b);
-@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
+@@ -636,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
static void __kmem_cache_free(void *b, int size)
{
@@ -75213,13 +75215,13 @@ index 8105be4..e045f96 100644
}
static void kmem_rcu_free(struct rcu_head *head)
-@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
+@@ -652,17 +755,31 @@ static void kmem_rcu_free(struct rcu_head *head)
void kmem_cache_free(struct kmem_cache *c, void *b)
{
+ int size = c->size;
+
-+#ifdef CONFIG_PAX_USERCOPY
++#ifdef CONFIG_PAX_USERCOPY_SLABS
+ if (size + c->align < PAGE_SIZE) {
+ size += c->align;
+ b -= c->align;
@@ -75239,7 +75241,7 @@ index 8105be4..e045f96 100644
+ __kmem_cache_free(b, size);
}
-+#ifdef CONFIG_PAX_USERCOPY
++#ifdef CONFIG_PAX_USERCOPY_SLABS
+ trace_kfree(_RET_IP_, b);
+#else
trace_kmem_cache_free(_RET_IP_, b);
@@ -75249,7 +75251,7 @@ index 8105be4..e045f96 100644
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
-index af47188..ff84aee 100644
+index af47188..9c2d9c0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -208,7 +208,7 @@ struct track {
@@ -75307,58 +75309,89 @@ index af47188..ff84aee 100644
list_del(&s->list);
up_write(&slub_lock);
if (kmem_cache_close(s)) {
-@@ -3361,6 +3362,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
+@@ -3179,6 +3180,10 @@ static struct kmem_cache *kmem_cache;
+ static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
+ #endif
+
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
++#endif
++
+ static int __init setup_slub_min_order(char *str)
+ {
+ get_option(&str, &slub_min_order);
+@@ -3293,6 +3298,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
+ return kmalloc_dma_caches[index];
+
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ if (flags & SLAB_USERCOPY)
++ return kmalloc_usercopy_caches[index];
++
++#endif
++
+ return kmalloc_caches[index];
+ }
+
+@@ -3361,6 +3373,56 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(__kmalloc_node);
#endif
-+void check_object_size(const void *ptr, unsigned long n, bool to)
++bool is_usercopy_object(const void *ptr)
+{
++ struct page *page;
++ struct kmem_cache *s;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ return false;
++
++ if (!virt_addr_valid(ptr))
++ return false;
++
++ page = virt_to_head_page(ptr);
++
++ if (!PageSlab(page))
++ return false;
++
++ s = page->slab;
++ return s->flags & SLAB_USERCOPY;
++}
+
+#ifdef CONFIG_PAX_USERCOPY
++const char *check_heap_object(const void *ptr, unsigned long n, bool to)
++{
+ struct page *page;
-+ struct kmem_cache *s = NULL;
++ struct kmem_cache *s;
+ unsigned long offset;
-+ const char *type;
+
-+ if (!n)
-+ return;
-+
-+ type = "<null>";
+ if (ZERO_OR_NULL_PTR(ptr))
-+ goto report;
++ return "<null>";
+
+ if (!virt_addr_valid(ptr))
-+ return;
++ return NULL;
+
+ page = virt_to_head_page(ptr);
+
-+ type = "<process stack>";
-+ if (!PageSlab(page)) {
-+ if (object_is_on_stack(ptr, n) == -1)
-+ goto report;
-+ return;
-+ }
++ if (!PageSlab(page))
++ return NULL;
+
+ s = page->slab;
-+ type = s->name;
+ if (!(s->flags & SLAB_USERCOPY))
-+ goto report;
++ return s->name;
+
+ offset = (ptr - page_address(page)) % s->size;
+ if (offset <= s->objsize && n <= s->objsize - offset)
-+ return;
-+
-+report:
-+ pax_report_usercopy(ptr, n, to, type);
-+#endif
++ return NULL;
+
++ return s->name;
+}
-+EXPORT_SYMBOL(check_object_size);
++#endif
+
size_t ksize(const void *object)
{
struct page *page;
-@@ -3635,7 +3680,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
+@@ -3635,7 +3697,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
int node;
list_add(&s->list, &slab_caches);
@@ -75367,7 +75400,7 @@ index af47188..ff84aee 100644
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
-@@ -3752,17 +3797,17 @@ void __init kmem_cache_init(void)
+@@ -3752,17 +3814,17 @@ void __init kmem_cache_init(void)
/* Caches that are not of the two-to-the-power-of size */
if (KMALLOC_MIN_SIZE <= 32) {
@@ -75388,7 +75421,30 @@ index af47188..ff84aee 100644
caches++;
}
-@@ -3830,7 +3875,7 @@ static int slab_unmergeable(struct kmem_cache *s)
+@@ -3804,6 +3866,22 @@ void __init kmem_cache_init(void)
+ }
+ }
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
++ struct kmem_cache *s = kmalloc_caches[i];
++
++ if (s && s->size) {
++ char *name = kasprintf(GFP_NOWAIT,
++ "usercopy-kmalloc-%d", s->objsize);
++
++ BUG_ON(!name);
++ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
++ s->objsize, SLAB_USERCOPY);
++ }
++ }
++#endif
++
+ printk(KERN_INFO
+ "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
+ " CPUs=%d, Nodes=%d\n",
+@@ -3830,7 +3908,7 @@ static int slab_unmergeable(struct kmem_cache *s)
/*
* We may have set a slab to be unmergeable during bootstrap.
*/
@@ -75397,7 +75453,7 @@ index af47188..ff84aee 100644
return 1;
return 0;
-@@ -3889,7 +3934,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+@@ -3889,7 +3967,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
@@ -75406,7 +75462,7 @@ index af47188..ff84aee 100644
/*
* Adjust the object sizes so that we clear
* the complete object on kzalloc.
-@@ -3898,7 +3943,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+@@ -3898,7 +3976,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
if (sysfs_slab_alias(s, name)) {
@@ -75415,7 +75471,7 @@ index af47188..ff84aee 100644
goto err;
}
up_write(&slub_lock);
-@@ -4027,7 +4072,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
+@@ -4027,7 +4105,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
}
#endif
@@ -75424,7 +75480,7 @@ index af47188..ff84aee 100644
static int count_inuse(struct page *page)
{
return page->inuse;
-@@ -4414,12 +4459,12 @@ static void resiliency_test(void)
+@@ -4414,12 +4492,12 @@ static void resiliency_test(void)
validate_slab_cache(kmalloc_caches[9]);
}
#else
@@ -75439,7 +75495,7 @@ index af47188..ff84aee 100644
enum slab_stat_type {
SL_ALL, /* All slabs */
SL_PARTIAL, /* Only partially allocated slabs */
-@@ -4660,7 +4705,7 @@ SLAB_ATTR_RO(ctor);
+@@ -4660,7 +4738,7 @@ SLAB_ATTR_RO(ctor);
static ssize_t aliases_show(struct kmem_cache *s, char *buf)
{
@@ -75448,7 +75504,7 @@ index af47188..ff84aee 100644
}
SLAB_ATTR_RO(aliases);
-@@ -5227,6 +5272,7 @@ static char *create_unique_id(struct kmem_cache *s)
+@@ -5227,6 +5305,7 @@ static char *create_unique_id(struct kmem_cache *s)
return name;
}
@@ -75456,7 +75512,7 @@ index af47188..ff84aee 100644
static int sysfs_slab_add(struct kmem_cache *s)
{
int err;
-@@ -5289,6 +5335,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
+@@ -5289,6 +5368,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
kobject_del(&s->kobj);
kobject_put(&s->kobj);
}
@@ -75464,7 +75520,7 @@ index af47188..ff84aee 100644
/*
* Need to buffer aliases during bootup until sysfs becomes
-@@ -5302,6 +5349,7 @@ struct saved_alias {
+@@ -5302,6 +5382,7 @@ struct saved_alias {
static struct saved_alias *alias_list;
@@ -75472,7 +75528,7 @@ index af47188..ff84aee 100644
static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
struct saved_alias *al;
-@@ -5324,6 +5372,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+@@ -5324,6 +5405,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
alias_list = al;
return 0;
}
@@ -75791,42 +75847,6 @@ index eeba3bb..820e22e 100644
if (!vas || !vms)
goto err_free;
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index fbe2d2c..8342119 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -2824,7 +2824,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
- * them before going back to sleep.
- */
- set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
-- schedule();
-+
-+ if (!kthread_should_stop())
-+ schedule();
-+
- set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
- } else {
- if (remaining)
-@@ -3090,14 +3093,17 @@ int kswapd_run(int nid)
- }
-
- /*
-- * Called by memory hotplug when all memory in a node is offlined.
-+ * Called by memory hotplug when all memory in a node is offlined. Caller must
-+ * hold lock_memory_hotplug().
- */
- void kswapd_stop(int nid)
- {
- struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
-
-- if (kswapd)
-+ if (kswapd) {
- kthread_stop(kswapd);
-+ NODE_DATA(nid)->kswapd = NULL;
-+ }
- }
-
- static int __init kswapd_init(void)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8fd603b..cf0d930 100644
--- a/mm/vmstat.c
@@ -76538,7 +76558,7 @@ index 68bbf9f..5ef0d12 100644
return err;
diff --git a/net/core/dev.c b/net/core/dev.c
-index 1cbddc9..e52e698 100644
+index 5738654..2078746 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
@@ -76583,7 +76603,7 @@ index 1cbddc9..e52e698 100644
#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
-@@ -2964,7 +2968,7 @@ enqueue:
+@@ -2943,7 +2947,7 @@ enqueue:
local_irq_restore(flags);
@@ -76592,7 +76612,7 @@ index 1cbddc9..e52e698 100644
kfree_skb(skb);
return NET_RX_DROP;
}
-@@ -3038,7 +3042,7 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3017,7 +3021,7 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
@@ -76601,7 +76621,7 @@ index 1cbddc9..e52e698 100644
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
-@@ -3327,7 +3331,7 @@ ncls:
+@@ -3306,7 +3310,7 @@ ncls:
if (pt_prev) {
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
} else {
@@ -76610,7 +76630,7 @@ index 1cbddc9..e52e698 100644
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
-@@ -3892,7 +3896,7 @@ void netif_napi_del(struct napi_struct *napi)
+@@ -3871,7 +3875,7 @@ void netif_napi_del(struct napi_struct *napi)
}
EXPORT_SYMBOL(netif_napi_del);
@@ -76619,7 +76639,7 @@ index 1cbddc9..e52e698 100644
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
-@@ -5918,7 +5922,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+@@ -5897,7 +5901,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
@@ -77203,18 +77223,18 @@ index 94cdbc5..0cb0063 100644
ts = peer->tcp_ts;
tsage = get_seconds() - peer->tcp_ts_stamp;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index 9726927..436489e 100644
+index 32e6ca2..436489e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
-@@ -5836,6 +5836,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+@@ -5836,7 +5836,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
goto discard;
if (th->syn) {
+- if (th->fin)
+ if (th->fin || th->urg || th->psh)
-+ goto discard;
+ goto discard;
if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
return 1;
-
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index de69cec..74908e1 100644
--- a/net/ipv4/tcp_ipv4.c
@@ -77934,10 +77954,10 @@ index 253695d..9481ce8 100644
seq_printf(m, "Max header size: %d\n", self->max_header_size);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
-index 274d150..656a144 100644
+index cf98d62..7bf2972 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
-@@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
+@@ -786,10 +786,10 @@ static int iucv_sock_autobind(struct sock *sk)
write_lock_bh(&iucv_sk_list.lock);
@@ -80097,7 +80117,7 @@ index d1d0ae8..6b73b2a 100644
sprintf(alias, "dmi*");
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
-index 619228d..274ce0e 100644
+index 619228d..bf61bbb 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -922,6 +922,7 @@ enum mismatch {
@@ -80139,12 +80159,12 @@ index 619228d..274ce0e 100644
free(prl_to);
break;
+ case DATA_TO_TEXT:
-+/*
++#if 0
+ fprintf(stderr,
-+ "The variable %s references\n"
-+ "the %s %s%s%s\n",
-+ fromsym, to, sec2annotation(tosec), tosym, to_p);
-+*/
++ "The %s %s:%s references\n"
++ "the %s %s:%s%s\n",
++ from, fromsec, fromsym, to, tosec, tosym, to_p);
++#endif
+ break;
}
fprintf(stderr, "\n");
@@ -80256,10 +80276,10 @@ index 38f6617..e70b72b 100755
exuberant()
diff --git a/security/Kconfig b/security/Kconfig
-index 51bd5a0..d660068 100644
+index 51bd5a0..f94ba7f 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,861 @@
+@@ -4,6 +4,875 @@
menu "Security options"
@@ -80284,6 +80304,9 @@ index 51bd5a0..d660068 100644
+ bool
+ default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
+
++ config PAX_USERCOPY_SLABS
++ bool
++
+config GRKERNSEC
+ bool "Grsecurity"
+ select CRYPTO
@@ -80518,13 +80541,12 @@ index 51bd5a0..d660068 100644
+ has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
+ support.
+
-+ If you have applications not marked by the PT_PAX_FLAGS ELF program
-+ header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
-+ option otherwise they will not get any protection.
-+
+ Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
+ support as well, they will override the legacy EI_PAX marks.
+
++ If you enable none of the marking options then all applications
++ will run with PaX enabled on them by default.
++
+config PAX_PT_PAX_FLAGS
+ bool 'Use ELF program header marking'
+ default y if GRKERNSEC_CONFIG_AUTO
@@ -80537,15 +80559,14 @@ index 51bd5a0..d660068 100644
+ integrated into the toolchain (the binutils patch is available
+ from http://pax.grsecurity.net).
+
-+ If you have applications not marked by the PT_PAX_FLAGS ELF program
-+ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
-+ support otherwise they will not get any protection.
++ Note that if you enable the legacy EI_PAX marking support as well,
++ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
+
+ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
+ must make sure that the marks are the same if a binary has both marks.
+
-+ Note that if you enable the legacy EI_PAX marking support as well,
-+ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
++ If you enable none of the marking options then all applications
++ will run with PaX enabled on them by default.
+
+config PAX_XATTR_PAX_FLAGS
+ bool 'Use filesystem extended attributes marking'
@@ -80570,15 +80591,14 @@ index 51bd5a0..d660068 100644
+ isofs, udf, vfat) so copying files through such filesystems will
+ lose the extended attributes and these PaX markings.
+
-+ If you have applications not marked by the PT_PAX_FLAGS ELF program
-+ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
-+ support otherwise they will not get any protection.
++ Note that if you enable the legacy EI_PAX marking support as well,
++ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
+
+ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
+ must make sure that the marks are the same if a binary has both marks.
+
-+ Note that if you enable the legacy EI_PAX marking support as well,
-+ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
++ If you enable none of the marking options then all applications
++ will run with PaX enabled on them by default.
+
+choice
+ prompt 'MAC system integration'
@@ -81068,6 +81088,7 @@ index 51bd5a0..d660068 100644
+ default y if GRKERNSEC_CONFIG_AUTO
+ depends on X86 || PPC || SPARC || ARM
+ depends on GRKERNSEC && (SLAB || SLUB || SLOB)
++ select PAX_USERCOPY_SLABS
+ help
+ By saying Y here the kernel will enforce the size of heap objects
+ when they are copied in either direction between the kernel and
@@ -81108,6 +81129,19 @@ index 51bd5a0..d660068 100644
+ Homepage:
+ http://www.grsecurity.net/~ephox/overflow_plugin/
+
++config PAX_LATENT_ENTROPY
++ bool "Generate some entropy during boot"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ By saying Y here the kernel will instrument early boot code to
++ extract some entropy from both original and artificially created
++ program state. This will help especially embedded systems where
++ there is little 'natural' source of entropy normally. The cost
++ is some slowdown of the boot process.
++
++ Note that entropy extracted this way is not cryptographically
++ secure!
++
+endmenu
+
+endmenu
@@ -81121,7 +81155,7 @@ index 51bd5a0..d660068 100644
config KEYS
bool "Enable access key retention support"
help
-@@ -169,7 +1024,7 @@ config INTEL_TXT
+@@ -169,7 +1038,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
@@ -82049,12 +82083,19 @@ index a39edcc..1014050 100644
int last_frame_number; /* stored frame number */
int last_delay; /* stored delay */
};
+diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
+new file mode 100644
+index 0000000..50f2f2f
+--- /dev/null
++++ b/tools/gcc/.gitignore
+@@ -0,0 +1 @@
++size_overflow_hash.h
diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
new file mode 100644
-index 0000000..f4f9986
+index 0000000..1d09b7e
--- /dev/null
+++ b/tools/gcc/Makefile
-@@ -0,0 +1,41 @@
+@@ -0,0 +1,43 @@
+#CC := gcc
+#PLUGIN_SOURCE_FILES := pax_plugin.c
+#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
@@ -82076,6 +82117,7 @@ index 0000000..f4f9986
+$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
+$(HOSTLIBS)-y += colorize_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
++$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
+
+always := $($(HOSTLIBS)-y)
+
@@ -82086,6 +82128,7 @@ index 0000000..f4f9986
+checker_plugin-objs := checker_plugin.o
+colorize_plugin-objs := colorize_plugin.o
+size_overflow_plugin-objs := size_overflow_plugin.o
++latent_entropy_plugin-objs := latent_entropy_plugin.o
+
+$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
+
@@ -82275,7 +82318,7 @@ index 0000000..d41b5af
+}
diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
new file mode 100644
-index 0000000..7a5e311
+index 0000000..846aeb0
--- /dev/null
+++ b/tools/gcc/colorize_plugin.c
@@ -0,0 +1,148 @@
@@ -82413,7 +82456,7 @@ index 0000000..7a5e311
+ struct register_pass_info colorize_rearm_pass_info = {
+ .pass = &pass_ipa_colorize_rearm.pass,
+ .reference_pass_name = "*free_lang_data",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_AFTER
+ };
+
@@ -82429,7 +82472,7 @@ index 0000000..7a5e311
+}
diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
new file mode 100644
-index 0000000..89b7f56
+index 0000000..048d4ff
--- /dev/null
+++ b/tools/gcc/constify_plugin.c
@@ -0,0 +1,328 @@
@@ -82735,7 +82778,7 @@ index 0000000..89b7f56
+ struct register_pass_info local_variable_pass_info = {
+ .pass = &pass_local_variable.pass,
+ .reference_pass_name = "*referenced_vars",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_AFTER
+ };
+
@@ -82863,7 +82906,7 @@ index 0000000..a0fe8b2
+exit 0
diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
new file mode 100644
-index 0000000..a5eabce
+index 0000000..a86e422
--- /dev/null
+++ b/tools/gcc/kallocstat_plugin.c
@@ -0,0 +1,167 @@
@@ -83020,7 +83063,7 @@ index 0000000..a5eabce
+ struct register_pass_info kallocstat_pass_info = {
+ .pass = &kallocstat_pass.pass,
+ .reference_pass_name = "ssa",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_AFTER
+ };
+
@@ -83036,7 +83079,7 @@ index 0000000..a5eabce
+}
diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
new file mode 100644
-index 0000000..d8a8da2
+index 0000000..98011fa
--- /dev/null
+++ b/tools/gcc/kernexec_plugin.c
@@ -0,0 +1,427 @@
@@ -83412,19 +83455,19 @@ index 0000000..d8a8da2
+ struct register_pass_info kernexec_reload_pass_info = {
+ .pass = &kernexec_reload_pass.pass,
+ .reference_pass_name = "ssa",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_AFTER
+ };
+ struct register_pass_info kernexec_fptr_pass_info = {
+ .pass = &kernexec_fptr_pass.pass,
+ .reference_pass_name = "ssa",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_AFTER
+ };
+ struct register_pass_info kernexec_retaddr_pass_info = {
+ .pass = &kernexec_retaddr_pass.pass,
+ .reference_pass_name = "pro_and_epilogue",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_AFTER
+ };
+
@@ -83467,6 +83510,307 @@ index 0000000..d8a8da2
+
+ return 0;
+}
+diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
+new file mode 100644
+index 0000000..b8008f7
+--- /dev/null
++++ b/tools/gcc/latent_entropy_plugin.c
+@@ -0,0 +1,295 @@
++/*
++ * Copyright 2012 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to help generate a little bit of entropy from program state,
++ * used during boot in the kernel
++ *
++ * TODO:
++ * - add ipa pass to identify not explicitly marked candidate functions
++ * - mix in more program state (function arguments/return values, loop variables, etc)
++ * - more instrumentation control via attribute parameters
++ *
++ * BUGS:
++ * - LTO needs -flto-partition=none for now
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "tree-flow.h"
++
++int plugin_is_GPL_compatible;
++
++static tree latent_entropy_decl;
++
++static struct plugin_info latent_entropy_plugin_info = {
++ .version = "201207271820",
++ .help = NULL
++};
++
++static unsigned int execute_latent_entropy(void);
++static bool gate_latent_entropy(void);
++
++static struct gimple_opt_pass latent_entropy_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "latent_entropy",
++ .gate = gate_latent_entropy,
++ .execute = execute_latent_entropy,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = PROP_gimple_leh | PROP_cfg,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
++ }
++};
++
++static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
++{
++ if (TREE_CODE(*node) != FUNCTION_DECL) {
++ *no_add_attrs = true;
++ error("%qE attribute only applies to functions", name);
++ }
++ return NULL_TREE;
++}
++
++static struct attribute_spec latent_entropy_attr = {
++ .name = "latent_entropy",
++ .min_length = 0,
++ .max_length = 0,
++ .decl_required = true,
++ .type_required = false,
++ .function_type_required = false,
++ .handler = handle_latent_entropy_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++ .affects_type_identity = false
++#endif
++};
++
++static void register_attributes(void *event_data, void *data)
++{
++ register_attribute(&latent_entropy_attr);
++}
++
++static bool gate_latent_entropy(void)
++{
++ tree latent_entropy_attr;
++
++ latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
++ return latent_entropy_attr != NULL_TREE;
++}
++
++static unsigned HOST_WIDE_INT seed;
++static unsigned HOST_WIDE_INT get_random_const(void)
++{
++ seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
++ return seed;
++}
++
++static enum tree_code get_op(tree *rhs)
++{
++ static enum tree_code op;
++ unsigned HOST_WIDE_INT random_const;
++
++ random_const = get_random_const();
++
++ switch (op) {
++ case BIT_XOR_EXPR:
++ op = PLUS_EXPR;
++ break;
++
++ case PLUS_EXPR:
++ if (rhs) {
++ op = LROTATE_EXPR;
++ random_const &= HOST_BITS_PER_WIDE_INT - 1;
++ break;
++ }
++
++ case LROTATE_EXPR:
++ default:
++ op = BIT_XOR_EXPR;
++ break;
++ }
++ if (rhs)
++ *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
++ return op;
++}
++
++static void perturb_local_entropy(basic_block bb, tree local_entropy)
++{
++ gimple_stmt_iterator gsi;
++ gimple assign;
++ tree addxorrol, rhs;
++ enum tree_code op;
++
++ op = get_op(&rhs);
++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
++ assign = gimple_build_assign(local_entropy, addxorrol);
++ find_referenced_vars_in(assign);
++//debug_bb(bb);
++ gsi = gsi_after_labels(bb);
++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++}
++
++static void perturb_latent_entropy(basic_block bb, tree rhs)
++{
++ gimple_stmt_iterator gsi;
++ gimple assign;
++ tree addxorrol, temp;
++
++ // 1. create temporary copy of latent_entropy
++ temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
++ add_referenced_var(temp);
++ mark_sym_for_renaming(temp);
++
++ // 2. read...
++ assign = gimple_build_assign(temp, latent_entropy_decl);
++ find_referenced_vars_in(assign);
++ gsi = gsi_after_labels(bb);
++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++
++ // 3. ...modify...
++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
++ assign = gimple_build_assign(temp, addxorrol);
++ find_referenced_vars_in(assign);
++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++
++ // 4. ...write latent_entropy
++ assign = gimple_build_assign(latent_entropy_decl, temp);
++ find_referenced_vars_in(assign);
++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++}
++
++static unsigned int execute_latent_entropy(void)
++{
++ basic_block bb;
++ gimple assign;
++ gimple_stmt_iterator gsi;
++ tree local_entropy;
++
++ if (!latent_entropy_decl) {
++ struct varpool_node *node;
++
++ for (node = varpool_nodes; node; node = node->next) {
++ tree var = node->decl;
++ if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
++ continue;
++ latent_entropy_decl = var;
++// debug_tree(var);
++ break;
++ }
++ if (!latent_entropy_decl) {
++// debug_tree(current_function_decl);
++ return 0;
++ }
++ }
++
++//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
++
++ // 1. create local entropy variable
++ local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
++ add_referenced_var(local_entropy);
++ mark_sym_for_renaming(local_entropy);
++
++ // 2. initialize local entropy variable
++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
++ if (dom_info_available_p(CDI_DOMINATORS))
++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
++ gsi = gsi_start_bb(bb);
++
++ assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
++// gimple_set_location(assign, loc);
++ find_referenced_vars_in(assign);
++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ bb = bb->next_bb;
++
++ // 3. instrument each BB with an operation on the local entropy variable
++ while (bb != EXIT_BLOCK_PTR) {
++ perturb_local_entropy(bb, local_entropy);
++ bb = bb->next_bb;
++ };
++
++ // 4. mix local entropy into the global entropy variable
++ perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
++ return 0;
++}
++
++static void start_unit_callback(void *gcc_data, void *user_data)
++{
++#if BUILDING_GCC_VERSION >= 4007
++ seed = get_random_seed(false);
++#else
++ sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
++ seed *= seed;
++#endif
++
++ if (in_lto_p)
++ return;
++
++ // extern u64 latent_entropy
++ latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node);
++
++ TREE_STATIC(latent_entropy_decl) = 1;
++ TREE_PUBLIC(latent_entropy_decl) = 1;
++ TREE_USED(latent_entropy_decl) = 1;
++ TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
++ DECL_EXTERNAL(latent_entropy_decl) = 1;
++ DECL_ARTIFICIAL(latent_entropy_decl) = 0;
++ DECL_INITIAL(latent_entropy_decl) = NULL;
++// DECL_ASSEMBLER_NAME(latent_entropy_decl);
++// varpool_finalize_decl(latent_entropy_decl);
++// varpool_mark_needed_node(latent_entropy_decl);
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ struct register_pass_info latent_entropy_pass_info = {
++ .pass = &latent_entropy_pass.pass,
++ .reference_pass_name = "optimized",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_BEFORE
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
++ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++ return 0;
++}
diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
new file mode 100644
index 0000000..54a12fe
@@ -87077,7 +87421,7 @@ index 0000000..cc96254
+}
diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
new file mode 100644
-index 0000000..b87ec9d
+index 0000000..38d2014
--- /dev/null
+++ b/tools/gcc/stackleak_plugin.c
@@ -0,0 +1,313 @@
@@ -87350,13 +87694,13 @@ index 0000000..b87ec9d
+ .pass = &stackleak_tree_instrument_pass.pass,
+// .reference_pass_name = "tree_profile",
+ .reference_pass_name = "optimized",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_BEFORE
+ };
+ struct register_pass_info stackleak_final_pass_info = {
+ .pass = &stackleak_final_rtl_opt_pass.pass,
+ .reference_pass_name = "final",
-+ .ref_pass_instance_number = 0,
++ .ref_pass_instance_number = 1,
+ .pos_op = PASS_POS_INSERT_BEFORE
+ };
+
diff --git a/3.2.23/4430_grsec-remove-localversion-grsec.patch b/3.2.24/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.2.23/4430_grsec-remove-localversion-grsec.patch
+++ b/3.2.24/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.2.23/4435_grsec-mute-warnings.patch b/3.2.24/4435_grsec-mute-warnings.patch
index e85abd6..e85abd6 100644
--- a/3.2.23/4435_grsec-mute-warnings.patch
+++ b/3.2.24/4435_grsec-mute-warnings.patch
diff --git a/3.2.23/4440_grsec-remove-protected-paths.patch b/3.2.24/4440_grsec-remove-protected-paths.patch
index 637934a..637934a 100644
--- a/3.2.23/4440_grsec-remove-protected-paths.patch
+++ b/3.2.24/4440_grsec-remove-protected-paths.patch
diff --git a/3.2.23/4450_grsec-kconfig-default-gids.patch b/3.2.24/4450_grsec-kconfig-default-gids.patch
index 0ab1250..0ab1250 100644
--- a/3.2.23/4450_grsec-kconfig-default-gids.patch
+++ b/3.2.24/4450_grsec-kconfig-default-gids.patch
diff --git a/3.2.23/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.24/4465_selinux-avc_audit-log-curr_ip.patch
index 48acad7..48acad7 100644
--- a/3.2.23/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.2.24/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.2.23/4470_disable-compat_vdso.patch b/3.2.24/4470_disable-compat_vdso.patch
index 4742d01..4742d01 100644
--- a/3.2.23/4470_disable-compat_vdso.patch
+++ b/3.2.24/4470_disable-compat_vdso.patch
diff --git a/3.4.6/0000_README b/3.4.6/0000_README
index 9b230b9..0a9e8d9 100644
--- a/3.4.6/0000_README
+++ b/3.4.6/0000_README
@@ -6,7 +6,7 @@ Patch: 1005_linux-3.4.6.patch
From: http://www.kernel.org
Desc: Linux 3.4.6
-Patch: 4420_grsecurity-2.9.1-3.4.6-201207242237.patch
+Patch: 4420_grsecurity-2.9.1-3.4.6-201207281946.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.4.6/4420_grsecurity-2.9.1-3.4.6-201207242237.patch b/3.4.6/4420_grsecurity-2.9.1-3.4.6-201207281946.patch
index 0f5d8af..357f472 100644
--- a/3.4.6/4420_grsecurity-2.9.1-3.4.6-201207242237.patch
+++ b/3.4.6/4420_grsecurity-2.9.1-3.4.6-201207281946.patch
@@ -236,7 +236,7 @@ index c1601e5..08557ce 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 5d0edcb..121c424 100644
+index 5d0edcb..f69ee4c 100644
--- a/Makefile
+++ b/Makefile
@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -296,11 +296,11 @@ index 5d0edcb..121c424 100644
+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
+endif
+ifdef CONFIG_PAX_LATENT_ENTROPY
-+LATENT_ENTROPY := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so
++LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
+endif
+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
-+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY)
++GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
+export PLUGINCC CONSTIFY_PLUGIN
+ifeq ($(KBUILD_EXTMOD),)
@@ -6527,7 +6527,7 @@ index 301421c..e2535d1 100644
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
obj-y += fault_$(BITS).o
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
-index df3155a..eb708b8 100644
+index df3155a..b6e32fa 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -21,6 +21,9 @@
@@ -6540,7 +6540,7 @@ index df3155a..eb708b8 100644
#include <asm/page.h>
#include <asm/pgtable.h>
-@@ -207,6 +210,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+@@ -207,6 +210,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
return safe_compute_effective_address(regs, insn);
}
@@ -6631,40 +6631,49 @@ index df3155a..eb708b8 100644
+ }
+ } while (0);
+
-+ { /* PaX: patched PLT emulation #2 */
++ do { /* PaX: patched PLT emulation #2 */
+ unsigned int ba;
+
+ err = get_user(ba, (unsigned int *)regs->pc);
+
-+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
+ unsigned int addr;
+
-+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
+ regs->pc = addr;
+ regs->npc = addr+4;
+ return 2;
+ }
-+ }
++ } while (0);
+
+ do { /* PaX: patched PLT emulation #3 */
-+ unsigned int sethi, jmpl, nop;
++ unsigned int sethi, bajmpl, nop;
+
+ err = get_user(sethi, (unsigned int *)regs->pc);
-+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
+
+ if (err)
+ break;
+
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
+ nop == 0x01000000U)
+ {
+ unsigned int addr;
+
+ addr = (sethi & 0x003FFFFFU) << 10;
+ regs->u_regs[UREG_G1] = addr;
-+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ else
++ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
+ regs->pc = addr;
+ regs->npc = addr+4;
+ return 2;
@@ -6809,7 +6818,7 @@ index df3155a..eb708b8 100644
static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
int text_fault)
{
-@@ -282,6 +547,24 @@ good_area:
+@@ -282,6 +556,24 @@ good_area:
if(!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
@@ -6835,7 +6844,7 @@ index df3155a..eb708b8 100644
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
-index 1fe0429..aee2e87 100644
+index 1fe0429..8dd5dd5 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -21,6 +21,9 @@
@@ -6857,7 +6866,7 @@ index 1fe0429..aee2e87 100644
printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
dump_stack();
unhandled_fault(regs->tpc, current, regs);
-@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
+@@ -272,6 +275,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
show_regs(regs);
}
@@ -6952,15 +6961,21 @@ index 1fe0429..aee2e87 100644
+ }
+ } while (0);
+
-+ { /* PaX: patched PLT emulation #2 */
++ do { /* PaX: patched PLT emulation #2 */
+ unsigned int ba;
+
+ err = get_user(ba, (unsigned int *)regs->tpc);
+
-+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
+ unsigned long addr;
+
-+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
+
+ if (test_thread_flag(TIF_32BIT))
+ addr &= 0xFFFFFFFFUL;
@@ -6969,27 +6984,30 @@ index 1fe0429..aee2e87 100644
+ regs->tnpc = addr+4;
+ return 2;
+ }
-+ }
++ } while (0);
+
+ do { /* PaX: patched PLT emulation #3 */
-+ unsigned int sethi, jmpl, nop;
++ unsigned int sethi, bajmpl, nop;
+
+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
+
+ if (err)
+ break;
+
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
+ nop == 0x01000000U)
+ {
+ unsigned long addr;
+
+ addr = (sethi & 0x003FFFFFU) << 10;
+ regs->u_regs[UREG_G1] = addr;
-+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ else
++ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
+
+ if (test_thread_flag(TIF_32BIT))
+ addr &= 0xFFFFFFFFUL;
@@ -7315,7 +7333,7 @@ index 1fe0429..aee2e87 100644
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
-@@ -343,6 +797,29 @@ retry:
+@@ -343,6 +806,29 @@ retry:
if (!vma)
goto bad_area;
@@ -59419,7 +59437,7 @@ index f1c8ca6..b5c1cc7 100644
#define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
-index b7babf0..c1e2d45 100644
+index b7babf0..3ba8aee 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -22,6 +22,12 @@
@@ -59672,7 +59690,7 @@ index b7babf0..c1e2d45 100644
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
-@@ -255,4 +393,53 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+@@ -255,4 +393,55 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
#endif /* BITS_PER_LONG == 64 */
@@ -59690,8 +59708,10 @@ index b7babf0..c1e2d45 100644
+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
++#ifdef CONFIG_X86
+ atomic_clear_mask_unchecked(0, NULL);
+ atomic_set_mask_unchecked(0, NULL);
++#endif
+
+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
@@ -60204,10 +60224,10 @@ index 42e55de..1cd0e66 100644
extern struct cleancache_ops
cleancache_register_ops(struct cleancache_ops *ops);
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
-index 2f40791..a62d196 100644
+index 2f40791..9c9e13c 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
-@@ -32,6 +32,16 @@
+@@ -32,6 +32,20 @@
#define __linktime_error(message) __attribute__((__error__(message)))
#if __GNUC_MINOR__ >= 5
@@ -60221,10 +60241,14 @@ index 2f40791..a62d196 100644
+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
+#endif
+
++#ifdef LATENT_ENTROPY_PLUGIN
++#define __latent_entropy __attribute__((latent_entropy))
++#endif
++
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
-@@ -47,6 +57,11 @@
+@@ -47,6 +61,11 @@
#define __noclone __attribute__((__noclone__))
#endif
@@ -60237,7 +60261,7 @@ index 2f40791..a62d196 100644
#if __GNUC_MINOR__ > 0
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
-index 923d093..726c17f 100644
+index 923d093..1fef491 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -5,31 +5,62 @@
@@ -60313,7 +60337,7 @@ index 923d093..726c17f 100644
#endif
#ifdef __KERNEL__
-@@ -264,6 +297,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -264,6 +297,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __attribute_const__ /* unimplemented */
#endif
@@ -60329,10 +60353,14 @@ index 923d093..726c17f 100644
+# define __size_overflow(...)
+#endif
+
++#ifndef __latent_entropy
++# define __latent_entropy
++#endif
++
/*
* Tell gcc if a function is cold. The compiler will assume any path
* directly leading to the call is unlikely.
-@@ -273,6 +318,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -273,6 +322,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define __cold
#endif
@@ -60355,7 +60383,7 @@ index 923d093..726c17f 100644
/* Simple shorthand for a section definition */
#ifndef __section
# define __section(S) __attribute__ ((__section__(#S)))
-@@ -308,6 +369,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -308,6 +373,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
* use is to mediate communication between process-level code and irq/NMI
* handlers, all running on the same CPU.
*/
@@ -61887,10 +61915,54 @@ index 58404b0..439ed95 100644
};
diff --git a/include/linux/init.h b/include/linux/init.h
-index 6b95109..4aca62c 100644
+index 6b95109..bcbdd68 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
-@@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
+@@ -39,9 +39,15 @@
+ * Also note, that this data cannot be "const".
+ */
+
++#ifdef MODULE
++#define add_latent_entropy
++#else
++#define add_latent_entropy __latent_entropy
++#endif
++
+ /* These are for everybody (although not all archs will actually
+ discard it in modules) */
+-#define __init __section(.init.text) __cold notrace
++#define __init __section(.init.text) __cold notrace add_latent_entropy
+ #define __initdata __section(.init.data)
+ #define __initconst __section(.init.rodata)
+ #define __exitdata __section(.exit.data)
+@@ -83,7 +89,7 @@
+ #define __exit __section(.exit.text) __exitused __cold notrace
+
+ /* Used for HOTPLUG */
+-#define __devinit __section(.devinit.text) __cold notrace
++#define __devinit __section(.devinit.text) __cold notrace add_latent_entropy
+ #define __devinitdata __section(.devinit.data)
+ #define __devinitconst __section(.devinit.rodata)
+ #define __devexit __section(.devexit.text) __exitused __cold notrace
+@@ -91,7 +97,7 @@
+ #define __devexitconst __section(.devexit.rodata)
+
+ /* Used for HOTPLUG_CPU */
+-#define __cpuinit __section(.cpuinit.text) __cold notrace
++#define __cpuinit __section(.cpuinit.text) __cold notrace add_latent_entropy
+ #define __cpuinitdata __section(.cpuinit.data)
+ #define __cpuinitconst __section(.cpuinit.rodata)
+ #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
+@@ -99,7 +105,7 @@
+ #define __cpuexitconst __section(.cpuexit.rodata)
+
+ /* Used for MEMORY_HOTPLUG */
+-#define __meminit __section(.meminit.text) __cold notrace
++#define __meminit __section(.meminit.text) __cold notrace add_latent_entropy
+ #define __meminitdata __section(.meminit.data)
+ #define __meminitconst __section(.meminit.rodata)
+ #define __memexit __section(.memexit.text) __exitused __cold notrace
+@@ -294,13 +300,13 @@ void __init parse_early_options(char *cmdline);
/* Each module must use one module_init(). */
#define module_init(initfn) \
@@ -71262,7 +71334,7 @@ index bf5b485..e44c2cb 100644
capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
diff --git a/mm/mlock.c b/mm/mlock.c
-index ef726e8..13e0901 100644
+index ef726e8..cd7f1ec 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -13,6 +13,7 @@
@@ -71273,6 +71345,15 @@ index ef726e8..13e0901 100644
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/rmap.h>
+@@ -376,7 +377,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
+ {
+ unsigned long nstart, end, tmp;
+ struct vm_area_struct * vma, * prev;
+- int error;
++ int error = 0;
+
+ VM_BUG_ON(start & ~PAGE_MASK);
+ VM_BUG_ON(len != PAGE_ALIGN(len));
@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
return -EINVAL;
if (end == start)
@@ -78607,10 +78688,10 @@ index 5c11312..72742b5 100644
write_hex_cnt = 0;
for (i = 0; i < logo_clutsize; i++) {
diff --git a/security/Kconfig b/security/Kconfig
-index ccc61f8..a2bd35c 100644
+index ccc61f8..5e68d73 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,874 @@
+@@ -4,6 +4,875 @@
menu "Security options"
@@ -79462,6 +79543,7 @@ index ccc61f8..a2bd35c 100644
+
+config PAX_LATENT_ENTROPY
+ bool "Generate some entropy during boot"
++ default y if GRKERNSEC_CONFIG_AUTO
+ help
+ By saying Y here the kernel will instrument early boot code to
+ extract some entropy from both original and artificially created
@@ -79485,7 +79567,7 @@ index ccc61f8..a2bd35c 100644
config KEYS
bool "Enable access key retention support"
help
-@@ -169,7 +1037,7 @@ config INTEL_TXT
+@@ -169,7 +1038,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
@@ -80358,7 +80440,7 @@ index 0000000..50f2f2f
+size_overflow_hash.h
diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
new file mode 100644
-index 0000000..e9d4079
+index 0000000..1d09b7e
--- /dev/null
+++ b/tools/gcc/Makefile
@@ -0,0 +1,43 @@
@@ -80370,10 +80452,10 @@ index 0000000..e9d4079
+
+ifeq ($(PLUGINCC),$(HOSTCC))
+HOSTLIBS := hostlibs
-+HOST_EXTRACFLAGS += -Iinclude -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
+else
+HOSTLIBS := hostcxxlibs
-+HOST_EXTRACXXFLAGS += -Iinclude -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu++98 -ggdb -Wno-unused-parameter
++HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu++98 -ggdb -Wno-unused-parameter
+endif
+
+$(HOSTLIBS)-y := constify_plugin.so
@@ -81778,10 +81860,10 @@ index 0000000..98011fa
+}
diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
new file mode 100644
-index 0000000..9788bfe
+index 0000000..b8008f7
--- /dev/null
+++ b/tools/gcc/latent_entropy_plugin.c
-@@ -0,0 +1,291 @@
+@@ -0,0 +1,295 @@
+/*
+ * Copyright 2012 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
@@ -81795,10 +81877,12 @@ index 0000000..9788bfe
+ * used during boot in the kernel
+ *
+ * TODO:
-+ * - quite a few, see the comments :)
++ * - add ipa pass to identify not explicitly marked candidate functions
++ * - mix in more program state (function arguments/return values, loop variables, etc)
++ * - more instrumentation control via attribute parameters
+ *
+ * BUGS:
-+ * - none known
++ * - LTO needs -flto-partition=none for now
+ */
+#include "gcc-plugin.h"
+#include "config.h"
@@ -81820,17 +81904,13 @@ index 0000000..9788bfe
+#include "rtl.h"
+#include "emit-rtl.h"
+#include "tree-flow.h"
-+#include "cpplib.h"
-+#include "c-pragma.h"
-+
-+#include "linux/kconfig.h"
+
+int plugin_is_GPL_compatible;
+
+static tree latent_entropy_decl;
+
+static struct plugin_info latent_entropy_plugin_info = {
-+ .version = "201207202140",
++ .version = "201207271820",
+ .help = NULL
+};
+
@@ -81855,54 +81935,39 @@ index 0000000..9788bfe
+ }
+};
+
-+// for kernel use we just want to instrument some of the boot code
-+// for userland use this would need changes
-+static bool gate_latent_entropy(void)
++static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
+{
-+ tree section_attr;
-+ const char *section_name;
-+
-+ // don't instrument modules
-+ if (cpp_defined(parse_in, (const unsigned char *)"MODULE", 6))
-+ return false;
-+
-+ // don't instrument normal code
-+ section_attr = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
-+ if (!section_attr || !TREE_VALUE(section_attr))
-+ return false;
-+
-+ section_name = TREE_STRING_POINTER(TREE_VALUE(TREE_VALUE(section_attr)));
-+
-+ // instrument code in boot related sections
-+ if (!strncmp(section_name, ".init.text", 10))
-+ return true;
-+
-+ if (!strncmp(section_name, ".initcall", 9))
-+ return true;
-+
-+ if (!strncmp(section_name, ".con_initcall", 13))
-+ return true;
-+
-+ if (!strncmp(section_name, ".security_initcall", 18))
-+ return true;
++ if (TREE_CODE(*node) != FUNCTION_DECL) {
++ *no_add_attrs = true;
++ error("%qE attribute only applies to functions", name);
++ }
++ return NULL_TREE;
++}
+
-+#ifndef CONFIG_HOTPLUG
-+ if (!strncmp(section_name, ".devinit.text", 13))
-+ return true;
++static struct attribute_spec latent_entropy_attr = {
++ .name = "latent_entropy",
++ .min_length = 0,
++ .max_length = 0,
++ .decl_required = true,
++ .type_required = false,
++ .function_type_required = false,
++ .handler = handle_latent_entropy_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++ .affects_type_identity = false
+#endif
++};
+
-+#ifndef CONFIG_HOTPLUG_CPU
-+ if (!strncmp(section_name, ".cpuinit.text", 13))
-+ return true;
-+#endif
++static void register_attributes(void *event_data, void *data)
++{
++ register_attribute(&latent_entropy_attr);
++}
+
-+#ifndef CONFIG_HOTPLUG_MEMORY
-+ if (!strncmp(section_name, ".meminit.text", 13))
-+ return true;
-+#endif
++static bool gate_latent_entropy(void)
++{
++ tree latent_entropy_attr;
+
-+ // TODO check whether cfun is static and all its callers meet the above criteria
-+ return false;
++ latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
++ return latent_entropy_attr != NULL_TREE;
+}
+
+static unsigned HOST_WIDE_INT seed;
@@ -81988,8 +82053,6 @@ index 0000000..9788bfe
+ find_referenced_vars_in(assign);
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
-+
-+ // TODO we could mix in more local state such as function return values, etc
+}
+
+static unsigned int execute_latent_entropy(void)
@@ -81999,6 +82062,23 @@ index 0000000..9788bfe
+ gimple_stmt_iterator gsi;
+ tree local_entropy;
+
++ if (!latent_entropy_decl) {
++ struct varpool_node *node;
++
++ for (node = varpool_nodes; node; node = node->next) {
++ tree var = node->decl;
++ if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
++ continue;
++ latent_entropy_decl = var;
++// debug_tree(var);
++ break;
++ }
++ if (!latent_entropy_decl) {
++// debug_tree(current_function_decl);
++ return 0;
++ }
++ }
++
+//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
+
+ // 1. create local entropy variable
@@ -82032,24 +82112,29 @@ index 0000000..9788bfe
+
+static void start_unit_callback(void *gcc_data, void *user_data)
+{
++#if BUILDING_GCC_VERSION >= 4007
++ seed = get_random_seed(false);
++#else
++ sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
++ seed *= seed;
++#endif
++
++ if (in_lto_p)
++ return;
++
+ // extern u64 latent_entropy
+ latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node);
+
+ TREE_STATIC(latent_entropy_decl) = 1;
+ TREE_PUBLIC(latent_entropy_decl) = 1;
++ TREE_USED(latent_entropy_decl) = 1;
++ TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
+ DECL_EXTERNAL(latent_entropy_decl) = 1;
-+ DECL_ARTIFICIAL(latent_entropy_decl) = 1;
++ DECL_ARTIFICIAL(latent_entropy_decl) = 0;
+ DECL_INITIAL(latent_entropy_decl) = NULL;
+// DECL_ASSEMBLER_NAME(latent_entropy_decl);
+// varpool_finalize_decl(latent_entropy_decl);
+// varpool_mark_needed_node(latent_entropy_decl);
-+
-+#if BUILDING_GCC_VERSION >= 4007
-+ seed = get_random_seed(false);
-+#else
-+ sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
-+ seed *= seed;
-+#endif
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
@@ -82070,6 +82155,7 @@ index 0000000..9788bfe
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
+ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
+
+ return 0;
+}