summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2012-01-13 11:05:10 -0500
committerAnthony G. Basile <blueness@gentoo.org>2012-01-13 11:05:10 -0500
commite90ba120d58d3cc38524f38fcfba78c599714cbb (patch)
tree7cc67c2338fe82bf61ed97ca536c676899b5a3eb
parentRemoved version bump patches which are now in genpatches (diff)
downloadhardened-patchset-e90ba120d58d3cc38524f38fcfba78c599714cbb.tar.gz
hardened-patchset-e90ba120d58d3cc38524f38fcfba78c599714cbb.tar.bz2
hardened-patchset-e90ba120d58d3cc38524f38fcfba78c599714cbb.zip
Grsec/PaX: 2.2.2-2.6.32.53-201201101724 + 2.2.2-3.1.8-20120111190620120111
-rw-r--r--2.6.32/0000_README6
-rw-r--r--2.6.32/1052_linux-2.6.32.53.patch200
-rw-r--r--2.6.32/4420_grsecurity-2.2.2-2.6.32.53-201201101724.patch (renamed from 2.6.32/4420_grsecurity-2.2.2-2.6.32.52-201201031758.patch)679
-rw-r--r--2.6.32/4425_grsec-pax-without-grsec.patch6
-rw-r--r--2.6.32/4435_grsec-kconfig-gentoo.patch6
-rw-r--r--3.1.8/0000_README (renamed from 3.1.7/0000_README)6
-rw-r--r--3.1.8/1007_linux-3.1.8.patch4111
-rw-r--r--3.1.8/4420_grsecurity-2.2.2-3.1.8-201201111906.patch (renamed from 3.1.7/4420_grsecurity-2.2.2-3.1.7-201201032037.patch)1115
-rw-r--r--3.1.8/4421_grsec-remove-localversion-grsec.patch (renamed from 3.1.7/4421_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.1.8/4422_grsec-mute-warnings.patch (renamed from 3.1.7/4422_grsec-mute-warnings.patch)0
-rw-r--r--3.1.8/4423_grsec-remove-protected-paths.patch (renamed from 3.1.7/4423_grsec-remove-protected-paths.patch)0
-rw-r--r--3.1.8/4425_grsec-pax-without-grsec.patch (renamed from 3.1.7/4425_grsec-pax-without-grsec.patch)6
-rw-r--r--3.1.8/4430_grsec-kconfig-default-gids.patch (renamed from 3.1.7/4430_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.1.8/4435_grsec-kconfig-gentoo.patch (renamed from 3.1.7/4435_grsec-kconfig-gentoo.patch)6
-rw-r--r--3.1.8/4437-grsec-kconfig-proc-user.patch (renamed from 3.1.7/4437-grsec-kconfig-proc-user.patch)0
-rw-r--r--3.1.8/4440_selinux-avc_audit-log-curr_ip.patch (renamed from 3.1.7/4440_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.1.8/4445_disable-compat_vdso.patch (renamed from 3.1.7/4445_disable-compat_vdso.patch)0
17 files changed, 5539 insertions, 602 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index 860515f..3369bb1 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -2,7 +2,11 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-2.2.2-2.6.32.52-201201031758.patch
+Patch: 1052_linux-2.6.32.53.patch
+From: http://www.kernel.org
+Desc: Linux 2.6.32.53
+
+Patch: 4420_grsecurity-2.2.2-2.6.32.53-201201101724.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/2.6.32/1052_linux-2.6.32.53.patch b/2.6.32/1052_linux-2.6.32.53.patch
new file mode 100644
index 0000000..4e00776
--- /dev/null
+++ b/2.6.32/1052_linux-2.6.32.53.patch
@@ -0,0 +1,200 @@
+diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
+index 5cdbd60..1640486 100644
+--- a/arch/arm/plat-mxc/pwm.c
++++ b/arch/arm/plat-mxc/pwm.c
+@@ -31,6 +31,9 @@
+ #define MX3_PWMSAR 0x0C /* PWM Sample Register */
+ #define MX3_PWMPR 0x10 /* PWM Period Register */
+ #define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4)
++#define MX3_PWMCR_DOZEEN (1 << 24)
++#define MX3_PWMCR_WAITEN (1 << 23)
++#define MX3_PWMCR_DBGEN (1 << 22)
+ #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
+ #define MX3_PWMCR_CLKSRC_IPG (1 << 16)
+ #define MX3_PWMCR_EN (1 << 0)
+@@ -73,10 +76,21 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+ do_div(c, period_ns);
+ duty_cycles = c;
+
++ /*
++ * according to imx pwm RM, the real period value should be
++ * PERIOD value in PWMPR plus 2.
++ */
++ if (period_cycles > 2)
++ period_cycles -= 2;
++ else
++ period_cycles = 0;
++
+ writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
+ writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
+
+- cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
++ cr = MX3_PWMCR_PRESCALER(prescale) |
++ MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
++ MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
+
+ if (cpu_is_mx25())
+ cr |= MX3_PWMCR_CLKSRC_IPG;
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index 1c9fba6..e5c77d8 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -1981,7 +1981,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
+ }
+ }
+
+- if (ret)
++ if (ret && ret != -EEXIST)
+ printk(KERN_ERR "cfq: cic link failed!\n");
+
+ return ret;
+@@ -1997,6 +1997,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+ {
+ struct io_context *ioc = NULL;
+ struct cfq_io_context *cic;
++ int ret;
+
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+@@ -2004,6 +2005,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+ if (!ioc)
+ return NULL;
+
++retry:
+ cic = cfq_cic_lookup(cfqd, ioc);
+ if (cic)
+ goto out;
+@@ -2012,7 +2014,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+ if (cic == NULL)
+ goto err;
+
+- if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
++ ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
++ if (ret == -EEXIST) {
++ /* someone has linked cic to ioc already */
++ cfq_cic_free(cic);
++ goto retry;
++ } else if (ret)
+ goto err_free;
+
+ out:
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index 705a589..68d800f 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -232,8 +232,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
+ }
+
+ if (!cmd->data || cmd->error) {
+- if (host->data)
++ if (host->data) {
++ /* Terminate the DMA transfer */
++ if (dma_inprogress(host))
++ mmci_dma_data_error(host);
+ mmci_stop_data(host);
++ }
+ mmci_request_end(host, cmd->mrq);
+ } else if (!(cmd->data->flags & MMC_DATA_READ)) {
+ mmci_start_data(host, cmd->data);
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 54e716a..31be89b 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -2472,6 +2472,9 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+
++ if (!(sc->sc_flags & SC_OP_TXAGGR))
++ return;
++
+ switch (cmd) {
+ case STA_NOTIFY_ADD:
+ ath_node_attach(sc, sta);
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index cb972b6..11253d9 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -3145,7 +3145,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+ /* insert into event log */
+ sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
+ sizeof(Mpi2EventDataSasDeviceStatusChange_t);
+- event_reply = kzalloc(sz, GFP_KERNEL);
++ event_reply = kzalloc(sz, GFP_ATOMIC);
+ if (!event_reply) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
+index 0b91907..2a9f54a 100644
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -220,6 +220,7 @@ static int __devinit cru_detect(unsigned long map_entry,
+
+ cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
+
++ set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
+ asminline_call(&cmn_regs, bios32_entrypoint);
+
+ if (cmn_regs.u1.ral != 0) {
+@@ -237,8 +238,10 @@ static int __devinit cru_detect(unsigned long map_entry,
+ if ((physical_bios_base + physical_bios_offset)) {
+ cru_rom_addr =
+ ioremap(cru_physical_address, cru_length);
+- if (cru_rom_addr)
++ if (cru_rom_addr) {
++ set_memory_x((unsigned long)cru_rom_addr, cru_length);
+ retval = 0;
++ }
+ }
+
+ printk(KERN_DEBUG "hpwdt: CRU Base Address: 0x%lx\n",
+diff --git a/kernel/hung_task.c b/kernel/hung_task.c
+index d4e84174..07187ae 100644
+--- a/kernel/hung_task.c
++++ b/kernel/hung_task.c
+@@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
+
+ /*
+ * Ensure the task is not frozen.
+- * Also, when a freshly created task is scheduled once, changes
+- * its state to TASK_UNINTERRUPTIBLE without having ever been
+- * switched out once, it musn't be checked.
++ * Also, skip vfork and any other user process that freezer should skip.
+ */
+- if (unlikely(t->flags & PF_FROZEN || !switch_count))
++ if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
++ return;
++
++ /*
++ * When a freshly created task is scheduled once, changes its state to
++ * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
++ * musn't be checked.
++ */
++ if (unlikely(!switch_count))
+ return;
+
+ if (switch_count != t->last_switch_count) {
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 9e0826e..a1fe378 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1675,7 +1675,7 @@ repeat:
+ page = __page_cache_alloc(gfp | __GFP_COLD);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+- err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
++ err = add_to_page_cache_lru(page, mapping, index, gfp);
+ if (unlikely(err)) {
+ page_cache_release(page);
+ if (err == -EEXIST)
+@@ -1772,10 +1772,7 @@ static struct page *wait_on_page_read(struct page *page)
+ * @gfp: the page allocator flags to use if allocating
+ *
+ * This is the same as "read_mapping_page(mapping, index, NULL)", but with
+- * any new page allocations done using the specified allocation flags. Note
+- * that the Radix tree operations will still use GFP_KERNEL, so you can't
+- * expect to do this atomically or anything like that - but you can pass in
+- * other page requirements.
++ * any new page allocations done using the specified allocation flags.
+ *
+ * If the page does not get brought uptodate, return -EIO.
+ */
diff --git a/2.6.32/4420_grsecurity-2.2.2-2.6.32.52-201201031758.patch b/2.6.32/4420_grsecurity-2.2.2-2.6.32.53-201201101724.patch
index 40137ff..c8f7239 100644
--- a/2.6.32/4420_grsecurity-2.2.2-2.6.32.52-201201031758.patch
+++ b/2.6.32/4420_grsecurity-2.2.2-2.6.32.53-201201101724.patch
@@ -185,7 +185,7 @@ index c840e7d..f4c451c 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 8f775f5..272691c 100644
+index 8472e43..c5792be 100644
--- a/Makefile
+++ b/Makefile
@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -220,7 +220,7 @@ index 8f775f5..272691c 100644
include/linux/version.h headers_% \
kernelrelease kernelversion
-@@ -526,6 +527,42 @@ else
+@@ -526,6 +527,46 @@ else
KBUILD_CFLAGS += -O2
endif
@@ -247,9 +247,13 @@ index 8f775f5..272691c 100644
+endif
+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
++ifeq ($(KBUILD_EXTMOD),)
+gcc-plugins:
+ $(Q)$(MAKE) $(build)=tools/gcc
+else
++gcc-plugins: ;
++endif
++else
+gcc-plugins:
+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
@@ -263,7 +267,7 @@ index 8f775f5..272691c 100644
include $(srctree)/arch/$(SRCARCH)/Makefile
ifneq ($(CONFIG_FRAME_WARN),0)
-@@ -647,7 +684,7 @@ export mod_strip_cmd
+@@ -647,7 +688,7 @@ export mod_strip_cmd
ifeq ($(KBUILD_EXTMOD),)
@@ -272,7 +276,7 @@ index 8f775f5..272691c 100644
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -868,6 +905,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
+@@ -868,6 +909,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
@@ -280,7 +284,7 @@ index 8f775f5..272691c 100644
$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
# Handle descending into subdirectories listed in $(vmlinux-dirs)
-@@ -877,7 +915,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+@@ -877,7 +919,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
# Error messages still appears in the original language
PHONY += $(vmlinux-dirs)
@@ -289,7 +293,7 @@ index 8f775f5..272691c 100644
$(Q)$(MAKE) $(build)=$@
# Build the kernel release string
-@@ -986,6 +1024,7 @@ prepare0: archprepare FORCE
+@@ -986,6 +1028,7 @@ prepare0: archprepare FORCE
$(Q)$(MAKE) $(build)=. missing-syscalls
# All the preparing..
@@ -297,7 +301,7 @@ index 8f775f5..272691c 100644
prepare: prepare0
# The asm symlink changes when $(ARCH) changes.
-@@ -1127,6 +1166,7 @@ all: modules
+@@ -1127,6 +1170,7 @@ all: modules
# using awk while concatenating to the final file.
PHONY += modules
@@ -305,7 +309,7 @@ index 8f775f5..272691c 100644
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
@$(kecho) ' Building modules, stage 2.';
-@@ -1136,7 +1176,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
+@@ -1136,7 +1180,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
# Target to prepare building external modules
PHONY += modules_prepare
@@ -314,7 +318,7 @@ index 8f775f5..272691c 100644
# Target to install modules
PHONY += modules_install
-@@ -1201,7 +1241,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
+@@ -1201,7 +1245,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
include/linux/autoconf.h include/linux/version.h \
include/linux/utsrelease.h \
include/linux/bounds.h include/asm*/asm-offsets.h \
@@ -323,7 +327,7 @@ index 8f775f5..272691c 100644
# clean - Delete most, but leave enough to build external modules
#
-@@ -1245,7 +1285,7 @@ distclean: mrproper
+@@ -1245,7 +1289,7 @@ distclean: mrproper
@find $(srctree) $(RCS_FIND_IGNORE) \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
@@ -332,7 +336,7 @@ index 8f775f5..272691c 100644
-o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
-type f -print | xargs rm -f
-@@ -1292,6 +1332,7 @@ help:
+@@ -1292,6 +1336,7 @@ help:
@echo ' modules_prepare - Set up for building external modules'
@echo ' tags/TAGS - Generate tags file for editors'
@echo ' cscope - Generate cscope index'
@@ -340,7 +344,7 @@ index 8f775f5..272691c 100644
@echo ' kernelrelease - Output the release version string'
@echo ' kernelversion - Output the version stored in Makefile'
@echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
-@@ -1393,6 +1434,7 @@ PHONY += $(module-dirs) modules
+@@ -1393,6 +1438,7 @@ PHONY += $(module-dirs) modules
$(module-dirs): crmodverdir $(objtree)/Module.symvers
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
@@ -348,7 +352,7 @@ index 8f775f5..272691c 100644
modules: $(module-dirs)
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1448,7 +1490,7 @@ endif # KBUILD_EXTMOD
+@@ -1448,7 +1494,7 @@ endif # KBUILD_EXTMOD
quiet_cmd_tags = GEN $@
cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
@@ -357,7 +361,7 @@ index 8f775f5..272691c 100644
$(call cmd,tags)
# Scripts to check various things for consistency
-@@ -1513,17 +1555,19 @@ else
+@@ -1513,17 +1559,19 @@ else
target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
endif
@@ -381,7 +385,7 @@ index 8f775f5..272691c 100644
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.symtypes: %.c prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1533,11 +1577,13 @@ endif
+@@ -1533,11 +1581,13 @@ endif
$(cmd_crmodverdir)
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
$(build)=$(build-dir)
@@ -7844,7 +7848,7 @@ index 588a7aa..a3468b0 100644
if (err)
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index 4edd8eb..07ac7fd 100644
+index 4edd8eb..a558697 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -13,7 +13,9 @@
@@ -7857,7 +7861,7 @@ index 4edd8eb..07ac7fd 100644
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
-@@ -93,6 +95,30 @@ ENTRY(native_irq_enable_sysexit)
+@@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
ENDPROC(native_irq_enable_sysexit)
#endif
@@ -7874,7 +7878,9 @@ index 4edd8eb..07ac7fd 100644
+#endif
+#ifdef CONFIG_PAX_RANDKSTACK
+ pushq %rax
++ pushq %r11
+ call pax_randomize_kstack
++ popq %r11
+ popq %rax
+#endif
+ .endm
@@ -7888,7 +7894,7 @@ index 4edd8eb..07ac7fd 100644
/*
* 32bit SYSENTER instruction entry.
*
-@@ -119,12 +145,6 @@ ENTRY(ia32_sysenter_target)
+@@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
CFI_REGISTER rsp,rbp
SWAPGS_UNSAFE_STACK
movq PER_CPU_VAR(kernel_stack), %rsp
@@ -7901,7 +7907,7 @@ index 4edd8eb..07ac7fd 100644
movl %ebp,%ebp /* zero extension */
pushq $__USER32_DS
CFI_ADJUST_CFA_OFFSET 8
-@@ -135,28 +155,41 @@ ENTRY(ia32_sysenter_target)
+@@ -135,28 +157,41 @@ ENTRY(ia32_sysenter_target)
pushfq
CFI_ADJUST_CFA_OFFSET 8
/*CFI_REL_OFFSET rflags,0*/
@@ -7949,7 +7955,7 @@ index 4edd8eb..07ac7fd 100644
CFI_REMEMBER_STATE
jnz sysenter_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
-@@ -166,13 +199,15 @@ sysenter_do_call:
+@@ -166,13 +201,15 @@ sysenter_do_call:
sysenter_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -7968,7 +7974,7 @@ index 4edd8eb..07ac7fd 100644
/* clear IF, that popfq doesn't enable interrupts early */
andl $~0x200,EFLAGS-R11(%rsp)
movl RIP-R11(%rsp),%edx /* User %eip */
-@@ -200,6 +235,9 @@ sysexit_from_sys_call:
+@@ -200,6 +237,9 @@ sysexit_from_sys_call:
movl %eax,%esi /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
call audit_syscall_entry
@@ -7978,7 +7984,7 @@ index 4edd8eb..07ac7fd 100644
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -211,7 +249,7 @@ sysexit_from_sys_call:
+@@ -211,7 +251,7 @@ sysexit_from_sys_call:
.endm
.macro auditsys_exit exit
@@ -7987,7 +7993,7 @@ index 4edd8eb..07ac7fd 100644
jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
sti
-@@ -221,12 +259,12 @@ sysexit_from_sys_call:
+@@ -221,12 +261,12 @@ sysexit_from_sys_call:
movzbl %al,%edi /* zero-extend that into %edi */
inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
call audit_syscall_exit
@@ -8002,7 +8008,7 @@ index 4edd8eb..07ac7fd 100644
jz \exit
CLEAR_RREGS -ARGOFFSET
jmp int_with_check
-@@ -244,7 +282,7 @@ sysexit_audit:
+@@ -244,7 +284,7 @@ sysexit_audit:
sysenter_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -8011,7 +8017,7 @@ index 4edd8eb..07ac7fd 100644
jz sysenter_auditsys
#endif
SAVE_REST
-@@ -252,6 +290,9 @@ sysenter_tracesys:
+@@ -252,6 +292,9 @@ sysenter_tracesys:
movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
movq %rsp,%rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
@@ -8021,7 +8027,7 @@ index 4edd8eb..07ac7fd 100644
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
cmpq $(IA32_NR_syscalls-1),%rax
-@@ -283,19 +324,20 @@ ENDPROC(ia32_sysenter_target)
+@@ -283,19 +326,20 @@ ENDPROC(ia32_sysenter_target)
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
@@ -8044,7 +8050,7 @@ index 4edd8eb..07ac7fd 100644
movl %eax,%eax /* zero extension */
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
movq %rcx,RIP-ARGOFFSET(%rsp)
-@@ -311,13 +353,19 @@ ENTRY(ia32_cstar_target)
+@@ -311,13 +355,19 @@ ENTRY(ia32_cstar_target)
/* no need to do an access_ok check here because r8 has been
32bit zero extended */
/* hardware stack frame is complete now */
@@ -8067,7 +8073,7 @@ index 4edd8eb..07ac7fd 100644
CFI_REMEMBER_STATE
jnz cstar_tracesys
cmpq $IA32_NR_syscalls-1,%rax
-@@ -327,13 +375,15 @@ cstar_do_call:
+@@ -327,13 +377,15 @@ cstar_do_call:
cstar_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -8086,7 +8092,7 @@ index 4edd8eb..07ac7fd 100644
RESTORE_ARGS 1,-ARG_SKIP,1,1,1
movl RIP-ARGOFFSET(%rsp),%ecx
CFI_REGISTER rip,rcx
-@@ -361,7 +411,7 @@ sysretl_audit:
+@@ -361,7 +413,7 @@ sysretl_audit:
cstar_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -8095,7 +8101,7 @@ index 4edd8eb..07ac7fd 100644
jz cstar_auditsys
#endif
xchgl %r9d,%ebp
-@@ -370,6 +420,9 @@ cstar_tracesys:
+@@ -370,6 +422,9 @@ cstar_tracesys:
movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
movq %rsp,%rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
@@ -8105,7 +8111,7 @@ index 4edd8eb..07ac7fd 100644
LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
RESTORE_REST
xchgl %ebp,%r9d
-@@ -415,11 +468,6 @@ ENTRY(ia32_syscall)
+@@ -415,11 +470,6 @@ ENTRY(ia32_syscall)
CFI_REL_OFFSET rip,RIP-RIP
PARAVIRT_ADJUST_EXCEPTION_FRAME
SWAPGS
@@ -8117,7 +8123,7 @@ index 4edd8eb..07ac7fd 100644
movl %eax,%eax
pushq %rax
CFI_ADJUST_CFA_OFFSET 8
-@@ -427,9 +475,15 @@ ENTRY(ia32_syscall)
+@@ -427,9 +477,15 @@ ENTRY(ia32_syscall)
/* note the registers are not zero extended to the sf.
this could be a problem. */
SAVE_ARGS 0,0,1
@@ -8136,7 +8142,7 @@ index 4edd8eb..07ac7fd 100644
jnz ia32_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -448,6 +502,9 @@ ia32_tracesys:
+@@ -448,6 +504,9 @@ ia32_tracesys:
movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
movq %rsp,%rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
@@ -8146,7 +8152,7 @@ index 4edd8eb..07ac7fd 100644
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
cmpq $(IA32_NR_syscalls-1),%rax
-@@ -462,6 +519,7 @@ ia32_badsys:
+@@ -462,6 +521,7 @@ ia32_badsys:
quiet_ni_syscall:
movq $-ENOSYS,%rax
@@ -8217,9 +8223,22 @@ index 016218c..47ccbdd 100644
set_fs(old_fs);
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
-index e2077d3..e134a5e 100644
+index e2077d3..b7a8919 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
+@@ -8,10 +8,10 @@
+
+ #ifdef CONFIG_SMP
+ .macro LOCK_PREFIX
+-1: lock
++672: lock
+ .section .smp_locks,"a"
+ .align 4
+- X86_ALIGN 1b
++ X86_ALIGN 672b
+ .previous
+ .endm
+ #else
@@ -19,4 +19,43 @@
.endm
#endif
@@ -8313,7 +8332,7 @@ index 20370c6..a2eb9b0 100644
"popl %%ebp\n\t"
"popl %%edi\n\t"
diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
-index dc5a667..fbed878 100644
+index dc5a667..939040c 100644
--- a/arch/x86/include/asm/atomic_32.h
+++ b/arch/x86/include/asm/atomic_32.h
@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
@@ -8564,10 +8583,11 @@ index dc5a667..fbed878 100644
: "+m" (v->counter), "=qm" (c)
: "ir" (i) : "memory");
return c;
-@@ -179,6 +341,46 @@ static inline int atomic_add_return(int i, atomic_t *v)
+@@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
#endif
/* Modern 486+ processor */
__i = i;
+- asm volatile(LOCK_PREFIX "xaddl %0, %1"
+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
@@ -8577,21 +8597,13 @@ index dc5a667..fbed878 100644
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
-+ : "+r" (i), "+m" (v->counter)
-+ : : "memory");
-+ return i + __i;
-+
-+#ifdef CONFIG_M386
-+no_xadd: /* Legacy 386 processor */
-+ local_irq_save(flags);
-+ __i = atomic_read(v);
-+ atomic_set(v, i + __i);
-+ local_irq_restore(flags);
-+ return i + __i;
-+#endif
-+}
-+
-+/**
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+@@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
+ }
+
+ /**
+ * atomic_add_return_unchecked - add integer and return
+ * @v: pointer of type atomic_unchecked_t
+ * @i: integer value to add
@@ -8608,9 +8620,25 @@ index dc5a667..fbed878 100644
+#endif
+ /* Modern 486+ processor */
+ __i = i;
- asm volatile(LOCK_PREFIX "xaddl %0, %1"
- : "+r" (i), "+m" (v->counter)
- : : "memory");
++ asm volatile(LOCK_PREFIX "xaddl %0, %1"
++ : "+r" (i), "+m" (v->counter)
++ : : "memory");
++ return i + __i;
++
++#ifdef CONFIG_M386
++no_xadd: /* Legacy 386 processor */
++ local_irq_save(flags);
++ __i = atomic_read_unchecked(v);
++ atomic_set_unchecked(v, i + __i);
++ local_irq_restore(flags);
++ return i + __i;
++#endif
++}
++
++/**
+ * atomic_sub_return - subtract integer and return
+ * @v: pointer of type atomic_t
+ * @i: integer value to subtract
@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return cmpxchg(&v->counter, old, new);
}
@@ -13594,7 +13622,7 @@ index 6e082dc..a0b5f36 100644
if (c->x86_model == 3 && c->x86_mask == 0)
size = 64;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 4e34d10..a53b130a 100644
+index 4e34d10..ba6bc97 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
@@ -13671,7 +13699,7 @@ index 4e34d10..a53b130a 100644
/* Filter out anything that depends on CPUID levels we don't have */
filter_cpuid_features(c, true);
-+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
+ setup_clear_cpu_cap(X86_FEATURE_SEP);
+#endif
+
@@ -15590,7 +15618,7 @@ index c097e7d..c689cf4 100644
/*
* End of kprobes section
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 34a56a9..a98c643 100644
+index 34a56a9..4aa5c8b 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -53,6 +53,8 @@
@@ -15774,9 +15802,9 @@ index 34a56a9..a98c643 100644
+ call pax_exit_kernel_user
+#endif
+#ifdef CONFIG_PAX_RANDKSTACK
-+ push %rax
++ pushq %rax
+ call pax_randomize_kstack
-+ pop %rax
++ popq %rax
+#endif
+ .endm
+
@@ -23776,7 +23804,7 @@ index 61b41ca..5fef66a 100644
extern u32 pnp_bios_is_utter_crap;
pnp_bios_is_utter_crap = 1;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
-index 8ac0d76..3f191dc 100644
+index 8ac0d76..87899a4 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -11,10 +11,19 @@
@@ -24253,7 +24281,7 @@ index 8ac0d76..3f191dc 100644
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return;
-@@ -1146,3 +1390,240 @@ good_area:
+@@ -1146,3 +1390,292 @@ good_area:
up_read(&mm->mmap_sem);
}
@@ -24294,6 +24322,30 @@ index 8ac0d76..3f191dc 100644
+{
+ int err;
+
++ do { /* PaX: libffi trampoline emulation */
++ unsigned char mov, jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 9) >> 32)
++ break;
++#endif
++
++ err = get_user(mov, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++
++ if (err)
++ break;
++
++ if (mov == 0xB8 && jmp == 0xE9) {
++ regs->ax = addr1;
++ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
++ return 2;
++ }
++ } while (0);
++
+ do { /* PaX: gcc trampoline emulation #1 */
+ unsigned char mov1, mov2;
+ unsigned short jmp;
@@ -24353,6 +24405,34 @@ index 8ac0d76..3f191dc 100644
+{
+ int err;
+
++ do { /* PaX: libffi trampoline emulation */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char stcclc, jmp2;
++ unsigned long addr1, addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
++ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ if (stcclc == 0xF8)
++ regs->flags &= ~X86_EFLAGS_CF;
++ else
++ regs->flags |= X86_EFLAGS_CF;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
+ do { /* PaX: gcc trampoline emulation #1 */
+ unsigned short mov1, mov2, jmp1;
+ unsigned char jmp2;
@@ -46157,10 +46237,18 @@ index 0133b5a..b3baa9f 100644
fd_offset + ex.a_text);
up_write(&current->mm->mmap_sem);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 1ed37ba..32cc555 100644
+index 1ed37ba..b9c035f 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
-@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
+@@ -31,6 +31,7 @@
+ #include <linux/random.h>
+ #include <linux/elf.h>
+ #include <linux/utsname.h>
++#include <linux/xattr.h>
+ #include <asm/uaccess.h>
+ #include <asm/param.h>
+ #include <asm/page.h>
+@@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
#define elf_core_dump NULL
#endif
@@ -46171,7 +46259,7 @@ index 1ed37ba..32cc555 100644
#if ELF_EXEC_PAGESIZE > PAGE_SIZE
#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
#else
-@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format = {
+@@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
.load_binary = load_elf_binary,
.load_shlib = load_elf_library,
.core_dump = elf_core_dump,
@@ -46183,7 +46271,7 @@ index 1ed37ba..32cc555 100644
.min_coredump = ELF_EXEC_PAGESIZE,
.hasvdso = 1
};
-@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format = {
+@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
static int set_brk(unsigned long start, unsigned long end)
{
@@ -46192,7 +46280,7 @@ index 1ed37ba..32cc555 100644
start = ELF_PAGEALIGN(start);
end = ELF_PAGEALIGN(end);
if (end > start) {
-@@ -87,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
+@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
if (BAD_ADDR(addr))
return addr;
}
@@ -46201,7 +46289,7 @@ index 1ed37ba..32cc555 100644
return 0;
}
-@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+@@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
elf_addr_t __user *u_rand_bytes;
const char *k_platform = ELF_PLATFORM;
const char *k_base_platform = ELF_BASE_PLATFORM;
@@ -46218,7 +46306,7 @@ index 1ed37ba..32cc555 100644
/*
* In some cases (e.g. Hyper-Threading), we want to avoid L1
-@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+@@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
* Generate 16 random bytes for userspace PRNG seeding.
*/
get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
@@ -46233,7 +46321,7 @@ index 1ed37ba..32cc555 100644
if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
return -EFAULT;
-@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+@@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
return -EFAULT;
current->mm->env_end = p;
@@ -46246,7 +46334,7 @@ index 1ed37ba..32cc555 100644
return -EFAULT;
return 0;
}
-@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+@@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
{
struct elf_phdr *elf_phdata;
struct elf_phdr *eppnt;
@@ -46259,7 +46347,7 @@ index 1ed37ba..32cc555 100644
unsigned long total_size;
int retval, i, size;
-@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+@@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
goto out_close;
}
@@ -46271,7 +46359,7 @@ index 1ed37ba..32cc555 100644
eppnt = elf_phdata;
for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
if (eppnt->p_type == PT_LOAD) {
-@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+@@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
k = load_addr + eppnt->p_vaddr;
if (BAD_ADDR(k) ||
eppnt->p_filesz > eppnt->p_memsz ||
@@ -46282,15 +46370,16 @@ index 1ed37ba..32cc555 100644
error = -ENOMEM;
goto out_close;
}
-@@ -532,6 +557,194 @@ out:
+@@ -532,6 +558,348 @@ out:
return error;
}
-+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
-+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
++static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
+{
+ unsigned long pax_flags = 0UL;
+
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++
+#ifdef CONFIG_PAX_PAGEEXEC
+ if (elf_phdata->p_flags & PF_PAGEEXEC)
+ pax_flags |= MF_PAX_PAGEEXEC;
@@ -46325,15 +46414,17 @@ index 1ed37ba..32cc555 100644
+ pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
++#endif
++
+ return pax_flags;
+}
-+#endif
+
-+#ifdef CONFIG_PAX_PT_PAX_FLAGS
-+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
++static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
+{
+ unsigned long pax_flags = 0UL;
+
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++
+#ifdef CONFIG_PAX_PAGEEXEC
+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
+ pax_flags |= MF_PAX_PAGEEXEC;
@@ -46368,15 +46459,17 @@ index 1ed37ba..32cc555 100644
+ pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
++#endif
++
+ return pax_flags;
+}
-+#endif
+
-+#ifdef CONFIG_PAX_EI_PAX
+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
+{
+ unsigned long pax_flags = 0UL;
+
++#ifdef CONFIG_PAX_EI_PAX
++
+#ifdef CONFIG_PAX_PAGEEXEC
+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
+ pax_flags |= MF_PAX_PAGEEXEC;
@@ -46411,25 +46504,38 @@ index 1ed37ba..32cc555 100644
+ pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
-+ return pax_flags;
-+}
++#else
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
-+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
-+{
-+ unsigned long pax_flags = 0UL;
++#ifdef CONFIG_PAX_MPROTECT
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
+
-+#ifdef CONFIG_PAX_PT_PAX_FLAGS
-+ unsigned long i;
-+ int found_flags = 0;
++#ifdef CONFIG_PAX_RANDMMAP
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(__supported_pte_mask & _PAGE_NX)) {
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ pax_flags |= MF_PAX_SEGMEXEC;
++ }
+#endif
+
-+#ifdef CONFIG_PAX_EI_PAX
-+ pax_flags = pax_parse_ei_pax(elf_ex);
+#endif
+
++ return pax_flags;
++}
++
++static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
++{
++
+#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ unsigned long i;
++
+ for (i = 0UL; i < elf_ex->e_phnum; i++)
+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
@@ -46437,34 +46543,170 @@ index 1ed37ba..32cc555 100644
+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
-+ return -EINVAL;
++ return ~0UL;
+
+#ifdef CONFIG_PAX_SOFTMODE
+ if (pax_softmode)
-+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
++ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
+ else
+#endif
+
-+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
-+ found_flags = 1;
++ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
+ break;
+ }
+#endif
+
-+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
-+ if (found_flags == 0) {
-+ struct elf_phdr phdr;
-+ memset(&phdr, 0, sizeof(phdr));
-+ phdr.p_flags = PF_NOEMUTRAMP;
++ return ~0UL;
++}
++
++static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (pax_flags_softmode & MF_PAX_MPROTECT)
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++
++static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++
++static unsigned long pax_parse_xattr_pax(struct file * const file)
++{
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ ssize_t xattr_size, i;
++ unsigned char xattr_value[5];
++ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
++
++ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
++ if (xattr_size <= 0)
++ return ~0UL;
++
++ for (i = 0; i < xattr_size; i++)
++ switch (xattr_value[i]) {
++ default:
++ return ~0UL;
++
++#define parse_flag(option1, option2, flag) \
++ case option1: \
++ pax_flags_hardmode |= MF_PAX_##flag; \
++ break; \
++ case option2: \
++ pax_flags_softmode |= MF_PAX_##flag; \
++ break;
++
++ parse_flag('p', 'P', PAGEEXEC);
++ parse_flag('e', 'E', EMUTRAMP);
++ parse_flag('m', 'M', MPROTECT);
++ parse_flag('r', 'R', RANDMMAP);
++ parse_flag('s', 'S', SEGMEXEC);
++
++#undef parse_flag
++ }
++
++ if (pax_flags_hardmode & pax_flags_softmode)
++ return ~0UL;
++
+#ifdef CONFIG_PAX_SOFTMODE
-+ if (pax_softmode)
-+ pax_flags = pax_parse_softmode(&phdr);
++ if (pax_softmode)
++ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
+ else
+#endif
-+ pax_flags = pax_parse_hardmode(&phdr);
-+ }
++
++ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
++#else
++ return ~0UL;
+#endif
++}
+
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
++static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
++{
++ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
++
++ pax_flags = pax_parse_ei_pax(elf_ex);
++ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
++ xattr_pax_flags = pax_parse_xattr_pax(file);
++
++ if (pt_pax_flags == ~0UL)
++ pt_pax_flags = xattr_pax_flags;
++ else if (xattr_pax_flags == ~0UL)
++ xattr_pax_flags = pt_pax_flags;
++ if (pt_pax_flags != xattr_pax_flags)
++ return -EINVAL;
++ if (pt_pax_flags != ~0UL)
++ pax_flags = pt_pax_flags;
+
+ if (0 > pax_check_flags(&pax_flags))
+ return -EINVAL;
@@ -46477,7 +46719,7 @@ index 1ed37ba..32cc555 100644
/*
* These are the functions used to load ELF style executables and shared
* libraries. There is no binary dependent code anywhere else.
-@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
+@@ -548,6 +916,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
{
unsigned int random_variable = 0;
@@ -46489,7 +46731,7 @@ index 1ed37ba..32cc555 100644
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE)) {
random_variable = get_random_int() & STACK_RND_MASK;
-@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -566,7 +939,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long load_addr = 0, load_bias = 0;
int load_addr_set = 0;
char * elf_interpreter = NULL;
@@ -46498,7 +46740,7 @@ index 1ed37ba..32cc555 100644
struct elf_phdr *elf_ppnt, *elf_phdata;
unsigned long elf_bss, elf_brk;
int retval, i;
-@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -576,11 +949,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long start_code, end_code, start_data, end_data;
unsigned long reloc_func_desc = 0;
int executable_stack = EXSTACK_DEFAULT;
@@ -46511,7 +46753,7 @@ index 1ed37ba..32cc555 100644
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
if (!loc) {
-@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -718,11 +1091,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
/* OK, This is the point of no return */
current->flags &= ~PF_FORKNOEXEC;
@@ -46536,8 +46778,8 @@ index 1ed37ba..32cc555 100644
+
+ current->mm->def_flags = 0;
+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
-+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
++ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
+ send_sig(SIGKILL, current, 0);
+ goto out_free_dentry;
+ }
@@ -46593,7 +46835,7 @@ index 1ed37ba..32cc555 100644
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
-@@ -800,10 +1087,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -800,10 +1242,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
* might try to exec. This is because the brk will
* follow the loader, and is not movable. */
#ifdef CONFIG_X86
@@ -46622,7 +46864,7 @@ index 1ed37ba..32cc555 100644
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-@@ -836,9 +1140,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -836,9 +1295,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
* allowed task size. Note that p_filesz must always be
* <= p_memsz so it is only necessary to check p_memsz.
*/
@@ -46635,7 +46877,7 @@ index 1ed37ba..32cc555 100644
/* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
retval = -EINVAL;
-@@ -866,6 +1170,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -866,6 +1325,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
start_data += load_bias;
end_data += load_bias;
@@ -46647,7 +46889,7 @@ index 1ed37ba..32cc555 100644
/* Calling set_brk effectively mmaps the pages that we need
* for the bss and break sections. We must do this before
* mapping in the interpreter, to make sure it doesn't wind
-@@ -877,9 +1186,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -877,9 +1341,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto out_free_dentry;
}
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
@@ -46662,7 +46904,7 @@ index 1ed37ba..32cc555 100644
}
if (elf_interpreter) {
-@@ -1112,8 +1423,10 @@ static int dump_seek(struct file *file, loff_t off)
+@@ -1112,8 +1578,10 @@ static int dump_seek(struct file *file, loff_t off)
unsigned long n = off;
if (n > PAGE_SIZE)
n = PAGE_SIZE;
@@ -46674,7 +46916,7 @@ index 1ed37ba..32cc555 100644
off -= n;
}
free_page((unsigned long)buf);
-@@ -1125,7 +1438,7 @@ static int dump_seek(struct file *file, loff_t off)
+@@ -1125,7 +1593,7 @@ static int dump_seek(struct file *file, loff_t off)
* Decide what to dump of a segment, part, all or none.
*/
static unsigned long vma_dump_size(struct vm_area_struct *vma,
@@ -46683,7 +46925,7 @@ index 1ed37ba..32cc555 100644
{
#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
-@@ -1159,7 +1472,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+@@ -1159,7 +1627,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
if (vma->vm_file == NULL)
return 0;
@@ -46692,7 +46934,7 @@ index 1ed37ba..32cc555 100644
goto whole;
/*
-@@ -1255,8 +1568,11 @@ static int writenote(struct memelfnote *men, struct file *file,
+@@ -1255,8 +1723,11 @@ static int writenote(struct memelfnote *men, struct file *file,
#undef DUMP_WRITE
#define DUMP_WRITE(addr, nr) \
@@ -46705,7 +46947,7 @@ index 1ed37ba..32cc555 100644
static void fill_elf_header(struct elfhdr *elf, int segs,
u16 machine, u32 flags, u8 osabi)
-@@ -1385,9 +1701,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+@@ -1385,9 +1856,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
{
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
int i = 0;
@@ -46717,7 +46959,7 @@ index 1ed37ba..32cc555 100644
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
-@@ -1973,7 +2289,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
+@@ -1973,7 +2444,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
@@ -46726,7 +46968,7 @@ index 1ed37ba..32cc555 100644
phdr.p_memsz = vma->vm_end - vma->vm_start;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
-@@ -2006,7 +2322,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
+@@ -2006,7 +2477,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
unsigned long addr;
unsigned long end;
@@ -46735,7 +46977,7 @@ index 1ed37ba..32cc555 100644
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
-@@ -2015,6 +2331,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
+@@ -2015,6 +2486,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
page = get_dump_page(addr);
if (page) {
void *kaddr = kmap(page);
@@ -46743,7 +46985,7 @@ index 1ed37ba..32cc555 100644
stop = ((size += PAGE_SIZE) > limit) ||
!dump_write(file, kaddr, PAGE_SIZE);
kunmap(page);
-@@ -2042,6 +2359,97 @@ out:
+@@ -2042,6 +2514,97 @@ out:
#endif /* USE_ELF_CORE_DUMP */
@@ -47998,10 +48240,10 @@ index 88ba4d4..073f003 100644
if (rc < 0)
goto out_free;
diff --git a/fs/exec.c b/fs/exec.c
-index 86fafc6..b307bfa 100644
+index 86fafc6..6d33cbb 100644
--- a/fs/exec.c
+++ b/fs/exec.c
-@@ -56,12 +56,24 @@
+@@ -56,12 +56,28 @@
#include <linux/fsnotify.h>
#include <linux/fs_struct.h>
#include <linux/pipe_fs_i.h>
@@ -48018,6 +48260,10 @@ index 86fafc6..b307bfa 100644
#include <asm/tlb.h>
#include "internal.h"
++#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
++void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
++#endif
++
+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
+EXPORT_SYMBOL(pax_set_initial_flags_func);
@@ -48026,7 +48272,7 @@ index 86fafc6..b307bfa 100644
int core_uses_pid;
char core_pattern[CORENAME_MAX_SIZE] = "core";
unsigned int core_pipe_limit;
-@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+@@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
@@ -48048,7 +48294,7 @@ index 86fafc6..b307bfa 100644
return NULL;
if (write) {
-@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+@@ -263,6 +271,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
vma->vm_end = STACK_TOP_MAX;
vma->vm_start = vma->vm_end - PAGE_SIZE;
vma->vm_flags = VM_STACK_FLAGS;
@@ -48060,7 +48306,7 @@ index 86fafc6..b307bfa 100644
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
-@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+@@ -276,6 +289,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
mm->stack_vm = mm->total_vm = 1;
up_write(&mm->mmap_sem);
bprm->p = vma->vm_end - sizeof(void *);
@@ -48073,7 +48319,7 @@ index 86fafc6..b307bfa 100644
return 0;
err:
up_write(&mm->mmap_sem);
-@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
+@@ -510,7 +529,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
int r;
mm_segment_t oldfs = get_fs();
set_fs(KERNEL_DS);
@@ -48082,7 +48328,7 @@ index 86fafc6..b307bfa 100644
set_fs(oldfs);
return r;
}
-@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+@@ -540,7 +559,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
unsigned long new_end = old_end - shift;
struct mmu_gather *tlb;
@@ -48092,7 +48338,7 @@ index 86fafc6..b307bfa 100644
/*
* ensure there are no vmas between where we want to go
-@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+@@ -549,6 +569,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
if (vma != find_vma(mm, new_start))
return -EFAULT;
@@ -48103,7 +48349,7 @@ index 86fafc6..b307bfa 100644
/*
* cover the whole range: [new_start, old_end)
*/
-@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
+@@ -630,10 +654,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
stack_top = arch_align_stack(stack_top);
stack_top = PAGE_ALIGN(stack_top);
@@ -48114,7 +48360,7 @@ index 86fafc6..b307bfa 100644
stack_shift = vma->vm_end - stack_top;
bprm->p -= stack_shift;
-@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
+@@ -645,6 +665,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
bprm->exec -= stack_shift;
down_write(&mm->mmap_sem);
@@ -48129,7 +48375,7 @@ index 86fafc6..b307bfa 100644
vm_flags = VM_STACK_FLAGS;
/*
-@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
+@@ -658,19 +686,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
vm_flags &= ~VM_EXEC;
vm_flags |= mm->def_flags;
@@ -48161,7 +48407,7 @@ index 86fafc6..b307bfa 100644
stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
stack_size = vma->vm_end - vma->vm_start;
/*
-@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_t offset,
+@@ -744,7 +777,7 @@ int kernel_read(struct file *file, loff_t offset,
old_fs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
@@ -48170,7 +48416,7 @@ index 86fafc6..b307bfa 100644
set_fs(old_fs);
return result;
}
-@@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
+@@ -1152,7 +1185,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
}
rcu_read_unlock();
@@ -48179,7 +48425,7 @@ index 86fafc6..b307bfa 100644
bprm->unsafe |= LSM_UNSAFE_SHARE;
} else {
res = -EAGAIN;
-@@ -1347,11 +1376,35 @@ int do_execve(char * filename,
+@@ -1347,11 +1380,35 @@ int do_execve(char * filename,
char __user *__user *envp,
struct pt_regs * regs)
{
@@ -48215,7 +48461,7 @@ index 86fafc6..b307bfa 100644
retval = unshare_files(&displaced);
if (retval)
-@@ -1383,6 +1436,16 @@ int do_execve(char * filename,
+@@ -1383,6 +1440,16 @@ int do_execve(char * filename,
bprm->filename = filename;
bprm->interp = filename;
@@ -48232,7 +48478,7 @@ index 86fafc6..b307bfa 100644
retval = bprm_mm_init(bprm);
if (retval)
goto out_file;
-@@ -1412,10 +1475,41 @@ int do_execve(char * filename,
+@@ -1412,10 +1479,41 @@ int do_execve(char * filename,
if (retval < 0)
goto out;
@@ -48275,7 +48521,7 @@ index 86fafc6..b307bfa 100644
/* execve succeeded */
current->fs->in_exec = 0;
-@@ -1426,6 +1520,14 @@ int do_execve(char * filename,
+@@ -1426,6 +1524,14 @@ int do_execve(char * filename,
put_files_struct(displaced);
return retval;
@@ -48290,7 +48536,7 @@ index 86fafc6..b307bfa 100644
out:
if (bprm->mm) {
acct_arg_size(bprm, 0);
-@@ -1591,6 +1693,220 @@ out:
+@@ -1591,6 +1697,220 @@ out:
return ispipe;
}
@@ -48511,7 +48757,7 @@ index 86fafc6..b307bfa 100644
static int zap_process(struct task_struct *start)
{
struct task_struct *t;
-@@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct file *file)
+@@ -1793,17 +2113,17 @@ static void wait_for_dump_helpers(struct file *file)
pipe = file->f_path.dentry->d_inode->i_pipe;
pipe_lock(pipe);
@@ -48534,7 +48780,7 @@ index 86fafc6..b307bfa 100644
pipe_unlock(pipe);
}
-@@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -1826,10 +2146,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
char **helper_argv = NULL;
int helper_argc = 0;
int dump_count = 0;
@@ -48549,7 +48795,7 @@ index 86fafc6..b307bfa 100644
binfmt = mm->binfmt;
if (!binfmt || !binfmt->core_dump)
goto fail;
-@@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -1874,6 +2197,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
*/
clear_thread_flag(TIF_SIGPENDING);
@@ -48558,7 +48804,7 @@ index 86fafc6..b307bfa 100644
/*
* lock_kernel() because format_corename() is controlled by sysctl, which
* uses lock_kernel()
-@@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -1908,7 +2233,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
goto fail_unlock;
}
@@ -48567,7 +48813,7 @@ index 86fafc6..b307bfa 100644
if (core_pipe_limit && (core_pipe_limit < dump_count)) {
printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
task_tgid_vnr(current), current->comm);
-@@ -1972,7 +2293,7 @@ close_fail:
+@@ -1972,7 +2297,7 @@ close_fail:
filp_close(file, NULL);
fail_dropcount:
if (dump_count)
@@ -65810,6 +66056,18 @@ index 7be0c6f..2f63a2b 100644
op->release = release;
INIT_LIST_HEAD(&op->pend_link);
fscache_set_op_state(op, "Init");
+diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
+index 4d6f47b..00bcedb 100644
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -86,6 +86,7 @@ struct fsnotify_ops {
+ void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
+ void (*free_event_priv)(struct fsnotify_event_private_data *priv);
+ };
++typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
+
+ /*
+ * A group is a "thing" that wants to receive notification about filesystem
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 4ec5e67..42f1eb9 100644
--- a/include/linux/ftrace_event.h
@@ -67381,7 +67639,7 @@ index 11e5be6..1ff2423 100644
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
-index 9d12ed5..8023125 100644
+index 9d12ed5..6d9707a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -186,6 +186,8 @@ struct vm_area_struct {
@@ -67398,7 +67656,7 @@ index 9d12ed5..8023125 100644
struct mmu_notifier_mm *mmu_notifier_mm;
#endif
+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+ unsigned long pax_flags;
+#endif
+
@@ -69201,6 +69459,22 @@ index 13070d6..aa4159a 100644
}
static inline void __dec_zone_page_state(struct page *page,
+diff --git a/include/linux/xattr.h b/include/linux/xattr.h
+index 5c84af8..1a3b6e2 100644
+--- a/include/linux/xattr.h
++++ b/include/linux/xattr.h
+@@ -33,6 +33,11 @@
+ #define XATTR_USER_PREFIX "user."
+ #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
+
++/* User namespace */
++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
++#define XATTR_PAX_FLAGS_SUFFIX "flags"
++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
++
+ struct inode;
+ struct dentry;
+
diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
index eed5fcc..5080d24 100644
--- a/include/media/saa7146_vv.h
@@ -75480,7 +75754,7 @@ index 67a33a5..094dcf1 100644
if (!bdi_cap_writeback_dirty(bdi))
return;
diff --git a/mm/filemap.c b/mm/filemap.c
-index 9e0826e..4ee8f13 100644
+index a1fe378..e26702f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
@@ -75492,7 +75766,7 @@ index 9e0826e..4ee8f13 100644
file_accessed(file);
vma->vm_ops = &generic_file_vm_ops;
vma->vm_flags |= VM_CAN_NONLINEAR;
-@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
+@@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
*pos = i_size_read(inode);
if (limit != RLIM_INFINITY) {
@@ -75555,10 +75829,18 @@ index 9c1e627..5ca9447 100644
set_page_address(page, (void *)vaddr);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 5e1e508..9f0ebad 100644
+index 5e1e508..ac70275 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
-@@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -869,6 +869,7 @@ free:
+ list_del(&page->lru);
+ enqueue_huge_page(h, page);
+ }
++ spin_unlock(&hugetlb_lock);
+
+ /* Free unnecessary surplus pages to the buddy allocator */
+ if (!list_empty(&surplus_list)) {
+@@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
return 1;
}
@@ -75585,7 +75867,7 @@ index 5e1e508..9f0ebad 100644
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, pte_t pte,
struct page *pagecache_page)
-@@ -2004,6 +2024,11 @@ retry_avoidcopy:
+@@ -2004,6 +2025,11 @@ retry_avoidcopy:
huge_ptep_clear_flush(vma, address, ptep);
set_huge_pte_at(mm, address, ptep,
make_huge_pte(vma, new_page, 1));
@@ -75597,7 +75879,7 @@ index 5e1e508..9f0ebad 100644
/* Make the old page be freed below */
new_page = old_page;
}
-@@ -2135,6 +2160,10 @@ retry:
+@@ -2135,6 +2161,10 @@ retry:
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
@@ -75608,7 +75890,7 @@ index 5e1e508..9f0ebad 100644
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
-@@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);
@@ -83550,10 +83832,10 @@ index d52f7a0..269eb1b 100755
rm -f tags
xtags ctags
diff --git a/security/Kconfig b/security/Kconfig
-index fb363cd..14c747b 100644
+index fb363cd..0524cf3 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,587 @@
+@@ -4,6 +4,625 @@
menu "Security options"
@@ -83593,12 +83875,11 @@ index fb363cd..14c747b 100644
+
+config PAX_SOFTMODE
+ bool 'Support soft mode'
-+ select PAX_PT_PAX_FLAGS
+ help
+ Enabling this option will allow you to run PaX in soft mode, that
+ is, PaX features will not be enforced by default, only on executables
-+ marked explicitly. You must also enable PT_PAX_FLAGS support as it
-+ is the only way to mark executables for soft mode use.
++ marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
++ support as they are the only way to mark executables for soft mode use.
+
+ Soft mode can be activated by using the "pax_softmode=1" kernel command
+ line option on boot. Furthermore you can control various PaX features
@@ -83613,10 +83894,15 @@ index fb363cd..14c747b 100644
+ an otherwise reserved part of the ELF header. This marking has
+ numerous drawbacks (no support for soft-mode, toolchain does not
+ know about the non-standard use of the ELF header) therefore it
-+ has been deprecated in favour of PT_PAX_FLAGS support.
++ has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
++ support.
+
-+ Note that if you enable PT_PAX_FLAGS marking support as well,
-+ the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
++ If you have applications not marked by the PT_PAX_FLAGS ELF program
++ header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
++ option otherwise they will not get any protection.
++
++ Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
++ support as well, they will override the legacy EI_PAX marks.
+
+config PAX_PT_PAX_FLAGS
+ bool 'Use ELF program header marking'
@@ -83629,12 +83915,47 @@ index fb363cd..14c747b 100644
+ integrated into the toolchain (the binutils patch is available
+ from http://pax.grsecurity.net).
+
-+ If your toolchain does not support PT_PAX_FLAGS markings,
-+ you can create one in most cases with 'paxctl -C'.
++ If you have applications not marked by the PT_PAX_FLAGS ELF program
++ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
++ support otherwise they will not get any protection.
++
++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
++ must make sure that the marks are the same if a binary has both marks.
+
+ Note that if you enable the legacy EI_PAX marking support as well,
+ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
+
++config PAX_XATTR_PAX_FLAGS
++ bool 'Use filesystem extended attributes marking'
++ depends on EXPERT
++ select CIFS_XATTR if CIFS
++ select EXT2_FS_XATTR if EXT2_FS
++ select EXT3_FS_XATTR if EXT3_FS
++ select EXT4_FS_XATTR if EXT4_FS
++ select JFFS2_FS_XATTR if JFFS2_FS
++ select REISERFS_FS_XATTR if REISERFS_FS
++ select UBIFS_FS_XATTR if UBIFS_FS
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'setfattr' utility. The control
++ flags will be read from the user.pax.flags extended attribute of
++ the file. This marking has the benefit of supporting binary-only
++ applications that self-check themselves (e.g., skype) and would
++ not tolerate chpax/paxctl changes. The main drawback is that
++ extended attributes are not supported by some filesystems (e.g.,
++ isofs, squashfs, tmpfs, udf, vfat) so copying files through such
++ filesystems will lose the extended attributes and these PaX markings.
++
++ If you have applications not marked by the PT_PAX_FLAGS ELF program
++ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
++ support otherwise they will not get any protection.
++
++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
++ must make sure that the marks are the same if a binary has both marks.
++
++ Note that if you enable the legacy EI_PAX marking support as well,
++ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
++
+choice
+ prompt 'MAC system integration'
+ default PAX_HAVE_ACL_FLAGS
@@ -83666,7 +83987,7 @@ index fb363cd..14c747b 100644
+
+config PAX_NOEXEC
+ bool "Enforce non-executable pages"
-+ depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
++ depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
+ help
+ By design some architectures do not allow for protecting memory
+ pages against execution or even if they do, Linux does not make
@@ -83941,7 +84262,6 @@ index fb363cd..14c747b 100644
+
+config PAX_ASLR
+ bool "Address Space Layout Randomization"
-+ depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
+ help
+ Many if not most exploit techniques rely on the knowledge of
+ certain addresses in the attacked program. The following options
@@ -84061,10 +84381,10 @@ index fb363cd..14c747b 100644
+ before deploying it.
+
+ Note: full support for this feature requires gcc with plugin support
-+ so make sure your compiler is at least gcc 4.5.0 (cross compilation
-+ is not supported). Using older gcc versions means that functions
-+ with large enough stack frames may leave uninitialized memory behind
-+ that may be exposed to a later syscall leaking the stack.
++ so make sure your compiler is at least gcc 4.5.0. Using older gcc
++ versions means that functions with large enough stack frames may
++ leave uninitialized memory behind that may be exposed to a later
++ syscall leaking the stack.
+
+config PAX_MEMORY_UDEREF
+ bool "Prevent invalid userland pointer dereference"
@@ -84141,7 +84461,7 @@ index fb363cd..14c747b 100644
config KEYS
bool "Enable access key retention support"
help
-@@ -146,7 +727,7 @@ config INTEL_TXT
+@@ -146,7 +765,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
@@ -84309,6 +84629,19 @@ index e031952..c9a535d 100644
goto error;
buflen -= tmp;
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index 931cfda..e71808a 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -208,7 +208,7 @@ static int install_process_keyring(void)
+ ret = install_process_keyring_to_cred(new);
+ if (ret < 0) {
+ abort_creds(new);
+- return ret != -EEXIST ?: 0;
++ return ret != -EEXIST ? ret : 0;
+ }
+
+ return commit_creds(new);
diff --git a/security/min_addr.c b/security/min_addr.c
index d9f9425..c28cef4 100644
--- a/security/min_addr.c
diff --git a/2.6.32/4425_grsec-pax-without-grsec.patch b/2.6.32/4425_grsec-pax-without-grsec.patch
index b5929f0..bbb8671 100644
--- a/2.6.32/4425_grsec-pax-without-grsec.patch
+++ b/2.6.32/4425_grsec-pax-without-grsec.patch
@@ -36,7 +36,7 @@ diff -Naur a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
diff -Naur a/fs/exec.c b/fs/exec.c
--- a/fs/exec.c 2011-04-17 18:15:55.000000000 -0400
+++ b/fs/exec.c 2011-04-17 18:29:40.000000000 -0400
-@@ -1803,9 +1803,11 @@
+@@ -1807,9 +1807,11 @@
}
up_read(&mm->mmap_sem);
}
@@ -48,7 +48,7 @@ diff -Naur a/fs/exec.c b/fs/exec.c
printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
"PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
-@@ -1820,10 +1822,12 @@
+@@ -1824,10 +1826,12 @@
#ifdef CONFIG_PAX_REFCOUNT
void pax_report_refcount_overflow(struct pt_regs *regs)
{
@@ -61,7 +61,7 @@ diff -Naur a/fs/exec.c b/fs/exec.c
printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
current->comm, task_pid_nr(current), current_uid(), current_euid());
print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
-@@ -1883,10 +1887,12 @@
+@@ -1887,10 +1891,12 @@
NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
{
diff --git a/2.6.32/4435_grsec-kconfig-gentoo.patch b/2.6.32/4435_grsec-kconfig-gentoo.patch
index 9097814..7c9be0d 100644
--- a/2.6.32/4435_grsec-kconfig-gentoo.patch
+++ b/2.6.32/4435_grsec-kconfig-gentoo.patch
@@ -290,7 +290,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
diff -Naur a/security/Kconfig b/security/Kconfig
--- a/security/Kconfig 2011-12-26 12:23:44.000000000 -0500
+++ b/security/Kconfig 2011-12-26 11:14:27.000000000 -0500
-@@ -322,9 +322,10 @@
+@@ -361,9 +361,10 @@
config PAX_KERNEXEC
bool "Enforce non-executable kernel pages"
@@ -302,7 +302,7 @@ diff -Naur a/security/Kconfig b/security/Kconfig
help
This is the kernel land equivalent of PAGEEXEC and MPROTECT,
that is, enabling this option will make it harder to inject
-@@ -335,30 +336,30 @@
+@@ -374,30 +375,30 @@
choice
prompt "Return Address Instrumentation Method"
@@ -341,7 +341,7 @@ diff -Naur a/security/Kconfig b/security/Kconfig
default ""
config PAX_KERNEXEC_MODULE_TEXT
-@@ -515,8 +516,9 @@
+@@ -553,8 +554,9 @@
config PAX_MEMORY_UDEREF
bool "Prevent invalid userland pointer dereference"
diff --git a/3.1.7/0000_README b/3.1.8/0000_README
index 6123813..9148cab 100644
--- a/3.1.7/0000_README
+++ b/3.1.8/0000_README
@@ -2,7 +2,11 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-2.2.2-3.1.7-201201032037.patch
+Patch: 1007_linux-3.1.8.patch
+From: http://www.kernel.org
+Desc: Linux 3.1.8
+
+Patch: 4420_grsecurity-2.2.2-3.1.8-201201111906.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.1.8/1007_linux-3.1.8.patch b/3.1.8/1007_linux-3.1.8.patch
new file mode 100644
index 0000000..5e3020a
--- /dev/null
+++ b/3.1.8/1007_linux-3.1.8.patch
@@ -0,0 +1,4111 @@
+diff --git a/Makefile b/Makefile
+index 96c48df..64a2e76 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 1
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = "Divemaster Edition"
+
+diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
+index 5a886cd..73a6a5b 100644
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -193,7 +193,7 @@ static struct platform_device rx51_charger_device = {
+ static void __init rx51_charger_init(void)
+ {
+ WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
+- GPIOF_OUT_INIT_LOW, "isp1704_reset"));
++ GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
+
+ platform_device_register(&rx51_charger_device);
+ }
+diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
+index c074e66..4e0a371 100644
+--- a/arch/arm/oprofile/common.c
++++ b/arch/arm/oprofile/common.c
+@@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
+ return oprofile_perf_init(ops);
+ }
+
+-void __exit oprofile_arch_exit(void)
++void oprofile_arch_exit(void)
+ {
+ oprofile_perf_exit();
+ }
+diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
+index 761c3c9..8d4fdb0 100644
+--- a/arch/arm/plat-mxc/pwm.c
++++ b/arch/arm/plat-mxc/pwm.c
+@@ -32,6 +32,9 @@
+ #define MX3_PWMSAR 0x0C /* PWM Sample Register */
+ #define MX3_PWMPR 0x10 /* PWM Period Register */
+ #define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4)
++#define MX3_PWMCR_DOZEEN (1 << 24)
++#define MX3_PWMCR_WAITEN (1 << 23)
++#define MX3_PWMCR_DBGEN (1 << 22)
+ #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
+ #define MX3_PWMCR_CLKSRC_IPG (1 << 16)
+ #define MX3_PWMCR_EN (1 << 0)
+@@ -74,10 +77,21 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+ do_div(c, period_ns);
+ duty_cycles = c;
+
++ /*
++ * according to imx pwm RM, the real period value should be
++ * PERIOD value in PWMPR plus 2.
++ */
++ if (period_cycles > 2)
++ period_cycles -= 2;
++ else
++ period_cycles = 0;
++
+ writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
+ writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
+
+- cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
++ cr = MX3_PWMCR_PRESCALER(prescale) |
++ MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
++ MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
+
+ if (cpu_is_mx25())
+ cr |= MX3_PWMCR_CLKSRC_IPG;
+diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
+index 6efc18b..bd58b72 100644
+--- a/arch/s390/oprofile/init.c
++++ b/arch/s390/oprofile/init.c
+@@ -88,7 +88,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+- if (retval)
++ if (retval <= 0)
+ return retval;
+
+ if (oprofile_started)
+diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
+index b4c2d2b..e4dd5d5 100644
+--- a/arch/sh/oprofile/common.c
++++ b/arch/sh/oprofile/common.c
+@@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
+ return oprofile_perf_init(ops);
+ }
+
+-void __exit oprofile_arch_exit(void)
++void oprofile_arch_exit(void)
+ {
+ oprofile_perf_exit();
+ kfree(sh_pmu_op_name);
+@@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
+ ops->backtrace = sh_backtrace;
+ return -ENODEV;
+ }
+-void __exit oprofile_arch_exit(void) {}
++void oprofile_arch_exit(void) {}
+ #endif /* CONFIG_HW_PERF_EVENTS */
+diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
+index 5b31a8e..a790cc6 100644
+--- a/arch/sparc/include/asm/pgtable_32.h
++++ b/arch/sparc/include/asm/pgtable_32.h
+@@ -431,10 +431,6 @@ extern unsigned long *sparc_valid_addr_bitmap;
+ #define kern_addr_valid(addr) \
+ (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
+
+-extern int io_remap_pfn_range(struct vm_area_struct *vma,
+- unsigned long from, unsigned long pfn,
+- unsigned long size, pgprot_t prot);
+-
+ /*
+ * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
+ * its high 4 bits. These macros/functions put it there or get it from there.
+@@ -443,6 +439,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
+ #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
+ #define GET_PFN(pfn) (pfn & 0x0fffffffUL)
+
++extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
++ unsigned long, pgprot_t);
++
++static inline int io_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long from, unsigned long pfn,
++ unsigned long size, pgprot_t prot)
++{
++ unsigned long long offset, space, phys_base;
++
++ offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
++ space = GET_IOSPACE(pfn);
++ phys_base = offset | (space << 32ULL);
++
++ return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
++}
++
+ #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+ #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+ ({ \
+diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
+index adf8932..38ebb2c 100644
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -757,10 +757,6 @@ static inline bool kern_addr_valid(unsigned long addr)
+
+ extern int page_in_phys_avail(unsigned long paddr);
+
+-extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+- unsigned long pfn,
+- unsigned long size, pgprot_t prot);
+-
+ /*
+ * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
+ * its high 4 bits. These macros/functions put it there or get it from there.
+@@ -769,6 +765,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+ #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
+ #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
+
++extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
++ unsigned long, pgprot_t);
++
++static inline int io_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long from, unsigned long pfn,
++ unsigned long size, pgprot_t prot)
++{
++ unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
++ int space = GET_IOSPACE(pfn);
++ unsigned long phys_base;
++
++ phys_base = offset | (((unsigned long) space) << 32UL);
++
++ return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
++}
++
+ #include <asm-generic/pgtable.h>
+
+ /* We provide our own get_unmapped_area to cope with VA holes and
+diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
+index e27f8ea..0c218e4 100644
+--- a/arch/sparc/kernel/entry.h
++++ b/arch/sparc/kernel/entry.h
+@@ -42,6 +42,9 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
+ extern void fpload(unsigned long *fpregs, unsigned long *fsr);
+
+ #else /* CONFIG_SPARC32 */
++
++#include <asm/trap_block.h>
++
+ struct popc_3insn_patch_entry {
+ unsigned int addr;
+ unsigned int insns[3];
+@@ -57,6 +60,10 @@ extern struct popc_6insn_patch_entry __popc_6insn_patch,
+ __popc_6insn_patch_end;
+
+ extern void __init per_cpu_patch(void);
++extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
++ struct sun4v_1insn_patch_entry *);
++extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
++ struct sun4v_2insn_patch_entry *);
+ extern void __init sun4v_patch(void);
+ extern void __init boot_cpu_id_too_large(int cpu);
+ extern unsigned int dcache_parity_tl1_occurred;
+diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
+index da0c6c7..e551987 100644
+--- a/arch/sparc/kernel/module.c
++++ b/arch/sparc/kernel/module.c
+@@ -17,6 +17,8 @@
+ #include <asm/processor.h>
+ #include <asm/spitfire.h>
+
++#include "entry.h"
++
+ #ifdef CONFIG_SPARC64
+
+ #include <linux/jump_label.h>
+@@ -203,6 +205,29 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
+ }
+
+ #ifdef CONFIG_SPARC64
++static void do_patch_sections(const Elf_Ehdr *hdr,
++ const Elf_Shdr *sechdrs)
++{
++ const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL;
++ char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
++
++ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
++ if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name))
++ sun4v_1insn = s;
++ if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name))
++ sun4v_2insn = s;
++ }
++
++ if (sun4v_1insn && tlb_type == hypervisor) {
++ void *p = (void *) sun4v_1insn->sh_addr;
++ sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size);
++ }
++ if (sun4v_2insn && tlb_type == hypervisor) {
++ void *p = (void *) sun4v_2insn->sh_addr;
++ sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size);
++ }
++}
++
+ int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+@@ -210,6 +235,8 @@ int module_finalize(const Elf_Ehdr *hdr,
+ /* make jump label nops */
+ jump_label_apply_nops(me);
+
++ do_patch_sections(hdr, sechdrs);
++
+ /* Cheetah's I-cache is fully coherent. */
+ if (tlb_type == spitfire) {
+ unsigned long va;
+diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
+index b01a06e..9e73c4a 100644
+--- a/arch/sparc/kernel/pci_sun4v.c
++++ b/arch/sparc/kernel/pci_sun4v.c
+@@ -848,10 +848,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
+ if (!irq)
+ return -ENOMEM;
+
+- if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+- return -EINVAL;
+ if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
+ return -EINVAL;
++ if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
++ return -EINVAL;
+
+ return irq;
+ }
+diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
+index c965595a..a854a1c 100644
+--- a/arch/sparc/kernel/setup_64.c
++++ b/arch/sparc/kernel/setup_64.c
+@@ -234,40 +234,50 @@ void __init per_cpu_patch(void)
+ }
+ }
+
+-void __init sun4v_patch(void)
++void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
++ struct sun4v_1insn_patch_entry *end)
+ {
+- extern void sun4v_hvapi_init(void);
+- struct sun4v_1insn_patch_entry *p1;
+- struct sun4v_2insn_patch_entry *p2;
+-
+- if (tlb_type != hypervisor)
+- return;
++ while (start < end) {
++ unsigned long addr = start->addr;
+
+- p1 = &__sun4v_1insn_patch;
+- while (p1 < &__sun4v_1insn_patch_end) {
+- unsigned long addr = p1->addr;
+-
+- *(unsigned int *) (addr + 0) = p1->insn;
++ *(unsigned int *) (addr + 0) = start->insn;
+ wmb();
+ __asm__ __volatile__("flush %0" : : "r" (addr + 0));
+
+- p1++;
++ start++;
+ }
++}
+
+- p2 = &__sun4v_2insn_patch;
+- while (p2 < &__sun4v_2insn_patch_end) {
+- unsigned long addr = p2->addr;
++void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
++ struct sun4v_2insn_patch_entry *end)
++{
++ while (start < end) {
++ unsigned long addr = start->addr;
+
+- *(unsigned int *) (addr + 0) = p2->insns[0];
++ *(unsigned int *) (addr + 0) = start->insns[0];
+ wmb();
+ __asm__ __volatile__("flush %0" : : "r" (addr + 0));
+
+- *(unsigned int *) (addr + 4) = p2->insns[1];
++ *(unsigned int *) (addr + 4) = start->insns[1];
+ wmb();
+ __asm__ __volatile__("flush %0" : : "r" (addr + 4));
+
+- p2++;
++ start++;
+ }
++}
++
++void __init sun4v_patch(void)
++{
++ extern void sun4v_hvapi_init(void);
++
++ if (tlb_type != hypervisor)
++ return;
++
++ sun4v_patch_1insn_range(&__sun4v_1insn_patch,
++ &__sun4v_1insn_patch_end);
++
++ sun4v_patch_2insn_range(&__sun4v_2insn_patch,
++ &__sun4v_2insn_patch_end);
+
+ sun4v_hvapi_init();
+ }
+diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
+index 2caa556..023b886 100644
+--- a/arch/sparc/kernel/signal32.c
++++ b/arch/sparc/kernel/signal32.c
+@@ -822,21 +822,23 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+-void do_signal32(sigset_t *oldset, struct pt_regs * regs,
+- int restart_syscall, unsigned long orig_i0)
++void do_signal32(sigset_t *oldset, struct pt_regs * regs)
+ {
+ struct k_sigaction ka;
++ unsigned long orig_i0;
++ int restart_syscall;
+ siginfo_t info;
+ int signr;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+- /* If the debugger messes with the program counter, it clears
+- * the "in syscall" bit, directing us to not perform a syscall
+- * restart.
+- */
+- if (restart_syscall && !pt_regs_is_syscall(regs))
+- restart_syscall = 0;
++ restart_syscall = 0;
++ orig_i0 = 0;
++ if (pt_regs_is_syscall(regs) &&
++ (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
++ restart_syscall = 1;
++ orig_i0 = regs->u_regs[UREG_G6];
++ }
+
+ if (signr > 0) {
+ if (restart_syscall)
+diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
+index 8ce247a..d54c6e5 100644
+--- a/arch/sparc/kernel/signal_32.c
++++ b/arch/sparc/kernel/signal_32.c
+@@ -519,10 +519,26 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
+ siginfo_t info;
+ int signr;
+
++ /* It's a lot of work and synchronization to add a new ptrace
++ * register for GDB to save and restore in order to get
++ * orig_i0 correct for syscall restarts when debugging.
++ *
++ * Although it should be the case that most of the global
++ * registers are volatile across a system call, glibc already
++ * depends upon that fact that we preserve them. So we can't
++ * just use any global register to save away the orig_i0 value.
++ *
++ * In particular %g2, %g3, %g4, and %g5 are all assumed to be
++ * preserved across a system call trap by various pieces of
++ * code in glibc.
++ *
++ * %g7 is used as the "thread register". %g6 is not used in
++ * any fixed manner. %g6 is used as a scratch register and
++ * a compiler temporary, but it's value is never used across
++ * a system call. Therefore %g6 is usable for orig_i0 storage.
++ */
+ if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
+- restart_syscall = 1;
+- else
+- restart_syscall = 0;
++ regs->u_regs[UREG_G6] = orig_i0;
+
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = &current->saved_sigmask;
+@@ -535,8 +551,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
+ * the software "in syscall" bit, directing us to not perform
+ * a syscall restart.
+ */
+- if (restart_syscall && !pt_regs_is_syscall(regs))
+- restart_syscall = 0;
++ restart_syscall = 0;
++ if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) {
++ restart_syscall = 1;
++ orig_i0 = regs->u_regs[UREG_G6];
++ }
++
+
+ if (signr > 0) {
+ if (restart_syscall)
+diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
+index a2b8159..f0836cd 100644
+--- a/arch/sparc/kernel/signal_64.c
++++ b/arch/sparc/kernel/signal_64.c
+@@ -529,11 +529,27 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
+ siginfo_t info;
+ int signr;
+
++ /* It's a lot of work and synchronization to add a new ptrace
++ * register for GDB to save and restore in order to get
++ * orig_i0 correct for syscall restarts when debugging.
++ *
++ * Although it should be the case that most of the global
++ * registers are volatile across a system call, glibc already
++ * depends upon that fact that we preserve them. So we can't
++ * just use any global register to save away the orig_i0 value.
++ *
++ * In particular %g2, %g3, %g4, and %g5 are all assumed to be
++ * preserved across a system call trap by various pieces of
++ * code in glibc.
++ *
++ * %g7 is used as the "thread register". %g6 is not used in
++ * any fixed manner. %g6 is used as a scratch register and
++ * a compiler temporary, but it's value is never used across
++ * a system call. Therefore %g6 is usable for orig_i0 storage.
++ */
+ if (pt_regs_is_syscall(regs) &&
+- (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
+- restart_syscall = 1;
+- } else
+- restart_syscall = 0;
++ (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
++ regs->u_regs[UREG_G6] = orig_i0;
+
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK)
+ oldset = &current->saved_sigmask;
+@@ -542,22 +558,20 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
+
+ #ifdef CONFIG_COMPAT
+ if (test_thread_flag(TIF_32BIT)) {
+- extern void do_signal32(sigset_t *, struct pt_regs *,
+- int restart_syscall,
+- unsigned long orig_i0);
+- do_signal32(oldset, regs, restart_syscall, orig_i0);
++ extern void do_signal32(sigset_t *, struct pt_regs *);
++ do_signal32(oldset, regs);
+ return;
+ }
+ #endif
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+- /* If the debugger messes with the program counter, it clears
+- * the software "in syscall" bit, directing us to not perform
+- * a syscall restart.
+- */
+- if (restart_syscall && !pt_regs_is_syscall(regs))
+- restart_syscall = 0;
++ restart_syscall = 0;
++ if (pt_regs_is_syscall(regs) &&
++ (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
++ restart_syscall = 1;
++ orig_i0 = regs->u_regs[UREG_G6];
++ }
+
+ if (signr > 0) {
+ if (restart_syscall)
+diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
+index 32b626c..7337067 100644
+--- a/arch/sparc/kernel/visemul.c
++++ b/arch/sparc/kernel/visemul.c
+@@ -713,17 +713,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
+ s16 b = (rs2 >> (i * 16)) & 0xffff;
+
+ if (a > b)
+- rd_val |= 1 << i;
++ rd_val |= 8 >> i;
+ }
+ break;
+
+ case FCMPGT32_OPF:
+ for (i = 0; i < 2; i++) {
+- s32 a = (rs1 >> (i * 32)) & 0xffff;
+- s32 b = (rs2 >> (i * 32)) & 0xffff;
++ s32 a = (rs1 >> (i * 32)) & 0xffffffff;
++ s32 b = (rs2 >> (i * 32)) & 0xffffffff;
+
+ if (a > b)
+- rd_val |= 1 << i;
++ rd_val |= 2 >> i;
+ }
+ break;
+
+@@ -733,17 +733,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
+ s16 b = (rs2 >> (i * 16)) & 0xffff;
+
+ if (a <= b)
+- rd_val |= 1 << i;
++ rd_val |= 8 >> i;
+ }
+ break;
+
+ case FCMPLE32_OPF:
+ for (i = 0; i < 2; i++) {
+- s32 a = (rs1 >> (i * 32)) & 0xffff;
+- s32 b = (rs2 >> (i * 32)) & 0xffff;
++ s32 a = (rs1 >> (i * 32)) & 0xffffffff;
++ s32 b = (rs2 >> (i * 32)) & 0xffffffff;
+
+ if (a <= b)
+- rd_val |= 1 << i;
++ rd_val |= 2 >> i;
+ }
+ break;
+
+@@ -753,17 +753,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
+ s16 b = (rs2 >> (i * 16)) & 0xffff;
+
+ if (a != b)
+- rd_val |= 1 << i;
++ rd_val |= 8 >> i;
+ }
+ break;
+
+ case FCMPNE32_OPF:
+ for (i = 0; i < 2; i++) {
+- s32 a = (rs1 >> (i * 32)) & 0xffff;
+- s32 b = (rs2 >> (i * 32)) & 0xffff;
++ s32 a = (rs1 >> (i * 32)) & 0xffffffff;
++ s32 b = (rs2 >> (i * 32)) & 0xffffffff;
+
+ if (a != b)
+- rd_val |= 1 << i;
++ rd_val |= 2 >> i;
+ }
+ break;
+
+@@ -773,17 +773,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
+ s16 b = (rs2 >> (i * 16)) & 0xffff;
+
+ if (a == b)
+- rd_val |= 1 << i;
++ rd_val |= 8 >> i;
+ }
+ break;
+
+ case FCMPEQ32_OPF:
+ for (i = 0; i < 2; i++) {
+- s32 a = (rs1 >> (i * 32)) & 0xffff;
+- s32 b = (rs2 >> (i * 32)) & 0xffff;
++ s32 a = (rs1 >> (i * 32)) & 0xffffffff;
++ s32 b = (rs2 >> (i * 32)) & 0xffffffff;
+
+ if (a == b)
+- rd_val |= 1 << i;
++ rd_val |= 2 >> i;
+ }
+ break;
+ }
+diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
+index 34fe657..4d8c497 100644
+--- a/arch/sparc/lib/memcpy.S
++++ b/arch/sparc/lib/memcpy.S
+@@ -7,40 +7,12 @@
+ * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+-#ifdef __KERNEL__
+-
+-#define FUNC(x) \
++#define FUNC(x) \
+ .globl x; \
+ .type x,@function; \
+- .align 4; \
++ .align 4; \
+ x:
+
+-#undef FASTER_REVERSE
+-#undef FASTER_NONALIGNED
+-#define FASTER_ALIGNED
+-
+-/* In kernel these functions don't return a value.
+- * One should use macros in asm/string.h for that purpose.
+- * We return 0, so that bugs are more apparent.
+- */
+-#define SETUP_RETL
+-#define RETL_INSN clr %o0
+-
+-#else
+-
+-/* libc */
+-
+-#include "DEFS.h"
+-
+-#define FASTER_REVERSE
+-#define FASTER_NONALIGNED
+-#define FASTER_ALIGNED
+-
+-#define SETUP_RETL mov %o0, %g6
+-#define RETL_INSN mov %g6, %o0
+-
+-#endif
+-
+ /* Both these macros have to start with exactly the same insn */
+ #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src + (offset) + 0x00], %t0; \
+@@ -164,30 +136,6 @@ x:
+ .text
+ .align 4
+
+-#ifdef FASTER_REVERSE
+-
+-70: /* rdword_align */
+-
+- andcc %o1, 1, %g0
+- be 4f
+- andcc %o1, 2, %g0
+-
+- ldub [%o1 - 1], %g2
+- sub %o1, 1, %o1
+- stb %g2, [%o0 - 1]
+- sub %o2, 1, %o2
+- be 3f
+- sub %o0, 1, %o0
+-4:
+- lduh [%o1 - 2], %g2
+- sub %o1, 2, %o1
+- sth %g2, [%o0 - 2]
+- sub %o2, 2, %o2
+- b 3f
+- sub %o0, 2, %o0
+-
+-#endif /* FASTER_REVERSE */
+-
+ 0:
+ retl
+ nop ! Only bcopy returns here and it retuns void...
+@@ -198,7 +146,7 @@ FUNC(__memmove)
+ #endif
+ FUNC(memmove)
+ cmp %o0, %o1
+- SETUP_RETL
++ mov %o0, %g7
+ bleu 9f
+ sub %o0, %o1, %o4
+
+@@ -207,8 +155,6 @@ FUNC(memmove)
+ bleu 0f
+ andcc %o4, 3, %o5
+
+-#ifndef FASTER_REVERSE
+-
+ add %o1, %o2, %o1
+ add %o0, %o2, %o0
+ sub %o1, 1, %o1
+@@ -224,295 +170,7 @@ FUNC(memmove)
+ sub %o0, 1, %o0
+
+ retl
+- RETL_INSN
+-
+-#else /* FASTER_REVERSE */
+-
+- add %o1, %o2, %o1
+- add %o0, %o2, %o0
+- bne 77f
+- cmp %o2, 15
+- bleu 91f
+- andcc %o1, 3, %g0
+- bne 70b
+-3:
+- andcc %o1, 4, %g0
+-
+- be 2f
+- mov %o2, %g1
+-
+- ld [%o1 - 4], %o4
+- sub %g1, 4, %g1
+- st %o4, [%o0 - 4]
+- sub %o1, 4, %o1
+- sub %o0, 4, %o0
+-2:
+- andcc %g1, 0xffffff80, %g7
+- be 3f
+- andcc %o0, 4, %g0
+-
+- be 74f + 4
+-5:
+- RMOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+- RMOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+- RMOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+- RMOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+- subcc %g7, 128, %g7
+- sub %o1, 128, %o1
+- bne 5b
+- sub %o0, 128, %o0
+-3:
+- andcc %g1, 0x70, %g7
+- be 72f
+- andcc %g1, 8, %g0
+-
+- sethi %hi(72f), %o5
+- srl %g7, 1, %o4
+- add %g7, %o4, %o4
+- sub %o1, %g7, %o1
+- sub %o5, %o4, %o5
+- jmpl %o5 + %lo(72f), %g0
+- sub %o0, %g7, %o0
+-
+-71: /* rmemcpy_table */
+- RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
+- RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
+- RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
+- RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
+- RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
+- RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
+- RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+-
+-72: /* rmemcpy_table_end */
+-
+- be 73f
+- andcc %g1, 4, %g0
+-
+- ldd [%o1 - 0x08], %g2
+- sub %o0, 8, %o0
+- sub %o1, 8, %o1
+- st %g2, [%o0]
+- st %g3, [%o0 + 0x04]
+-
+-73: /* rmemcpy_last7 */
+-
+- be 1f
+- andcc %g1, 2, %g0
+-
+- ld [%o1 - 4], %g2
+- sub %o1, 4, %o1
+- st %g2, [%o0 - 4]
+- sub %o0, 4, %o0
+-1:
+- be 1f
+- andcc %g1, 1, %g0
+-
+- lduh [%o1 - 2], %g2
+- sub %o1, 2, %o1
+- sth %g2, [%o0 - 2]
+- sub %o0, 2, %o0
+-1:
+- be 1f
+- nop
+-
+- ldub [%o1 - 1], %g2
+- stb %g2, [%o0 - 1]
+-1:
+- retl
+- RETL_INSN
+-
+-74: /* rldd_std */
+- RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+- RMOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+- RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+- RMOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+- subcc %g7, 128, %g7
+- sub %o1, 128, %o1
+- bne 74b
+- sub %o0, 128, %o0
+-
+- andcc %g1, 0x70, %g7
+- be 72b
+- andcc %g1, 8, %g0
+-
+- sethi %hi(72b), %o5
+- srl %g7, 1, %o4
+- add %g7, %o4, %o4
+- sub %o1, %g7, %o1
+- sub %o5, %o4, %o5
+- jmpl %o5 + %lo(72b), %g0
+- sub %o0, %g7, %o0
+-
+-75: /* rshort_end */
+-
+- and %o2, 0xe, %o3
+-2:
+- sethi %hi(76f), %o5
+- sll %o3, 3, %o4
+- sub %o0, %o3, %o0
+- sub %o5, %o4, %o5
+- sub %o1, %o3, %o1
+- jmpl %o5 + %lo(76f), %g0
+- andcc %o2, 1, %g0
+-
+- RMOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
+- RMOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
+- RMOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
+- RMOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
+- RMOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
+- RMOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
+- RMOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
+-
+-76: /* rshort_table_end */
+-
+- be 1f
+- nop
+- ldub [%o1 - 1], %g2
+- stb %g2, [%o0 - 1]
+-1:
+- retl
+- RETL_INSN
+-
+-91: /* rshort_aligned_end */
+-
+- bne 75b
+- andcc %o2, 8, %g0
+-
+- be 1f
+- andcc %o2, 4, %g0
+-
+- ld [%o1 - 0x08], %g2
+- ld [%o1 - 0x04], %g3
+- sub %o1, 8, %o1
+- st %g2, [%o0 - 0x08]
+- st %g3, [%o0 - 0x04]
+- sub %o0, 8, %o0
+-1:
+- b 73b
+- mov %o2, %g1
+-
+-77: /* rnon_aligned */
+- cmp %o2, 15
+- bleu 75b
+- andcc %o0, 3, %g0
+- be 64f
+- andcc %o0, 1, %g0
+- be 63f
+- andcc %o0, 2, %g0
+- ldub [%o1 - 1], %g5
+- sub %o1, 1, %o1
+- stb %g5, [%o0 - 1]
+- sub %o0, 1, %o0
+- be 64f
+- sub %o2, 1, %o2
+-63:
+- ldub [%o1 - 1], %g5
+- sub %o1, 2, %o1
+- stb %g5, [%o0 - 1]
+- sub %o0, 2, %o0
+- ldub [%o1], %g5
+- sub %o2, 2, %o2
+- stb %g5, [%o0]
+-64:
+- and %o1, 3, %g2
+- and %o1, -4, %o1
+- and %o2, 0xc, %g3
+- add %o1, 4, %o1
+- cmp %g3, 4
+- sll %g2, 3, %g4
+- mov 32, %g2
+- be 4f
+- sub %g2, %g4, %g7
+-
+- blu 3f
+- cmp %g3, 8
+-
+- be 2f
+- srl %o2, 2, %g3
+-
+- ld [%o1 - 4], %o3
+- add %o0, -8, %o0
+- ld [%o1 - 8], %o4
+- add %o1, -16, %o1
+- b 7f
+- add %g3, 1, %g3
+-2:
+- ld [%o1 - 4], %o4
+- add %o0, -4, %o0
+- ld [%o1 - 8], %g1
+- add %o1, -12, %o1
+- b 8f
+- add %g3, 2, %g3
+-3:
+- ld [%o1 - 4], %o5
+- add %o0, -12, %o0
+- ld [%o1 - 8], %o3
+- add %o1, -20, %o1
+- b 6f
+- srl %o2, 2, %g3
+-4:
+- ld [%o1 - 4], %g1
+- srl %o2, 2, %g3
+- ld [%o1 - 8], %o5
+- add %o1, -24, %o1
+- add %o0, -16, %o0
+- add %g3, -1, %g3
+-
+- ld [%o1 + 12], %o3
+-5:
+- sll %o5, %g4, %g2
+- srl %g1, %g7, %g5
+- or %g2, %g5, %g2
+- st %g2, [%o0 + 12]
+-6:
+- ld [%o1 + 8], %o4
+- sll %o3, %g4, %g2
+- srl %o5, %g7, %g5
+- or %g2, %g5, %g2
+- st %g2, [%o0 + 8]
+-7:
+- ld [%o1 + 4], %g1
+- sll %o4, %g4, %g2
+- srl %o3, %g7, %g5
+- or %g2, %g5, %g2
+- st %g2, [%o0 + 4]
+-8:
+- ld [%o1], %o5
+- sll %g1, %g4, %g2
+- srl %o4, %g7, %g5
+- addcc %g3, -4, %g3
+- or %g2, %g5, %g2
+- add %o1, -16, %o1
+- st %g2, [%o0]
+- add %o0, -16, %o0
+- bne,a 5b
+- ld [%o1 + 12], %o3
+- sll %o5, %g4, %g2
+- srl %g1, %g7, %g5
+- srl %g4, 3, %g3
+- or %g2, %g5, %g2
+- add %o1, %g3, %o1
+- andcc %o2, 2, %g0
+- st %g2, [%o0 + 12]
+- be 1f
+- andcc %o2, 1, %g0
+-
+- ldub [%o1 + 15], %g5
+- add %o1, -2, %o1
+- stb %g5, [%o0 + 11]
+- add %o0, -2, %o0
+- ldub [%o1 + 16], %g5
+- stb %g5, [%o0 + 12]
+-1:
+- be 1f
+- nop
+- ldub [%o1 + 15], %g5
+- stb %g5, [%o0 + 11]
+-1:
+- retl
+- RETL_INSN
+-
+-#endif /* FASTER_REVERSE */
++ mov %g7, %o0
+
+ /* NOTE: This code is executed just for the cases,
+ where %src (=%o1) & 3 is != 0.
+@@ -546,7 +204,7 @@ FUNC(memmove)
+ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
+
+ sub %o0, %o1, %o4
+- SETUP_RETL
++ mov %o0, %g7
+ 9:
+ andcc %o4, 3, %o5
+ 0:
+@@ -569,7 +227,7 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
+ add %o1, 4, %o1
+ add %o0, 4, %o0
+ 2:
+- andcc %g1, 0xffffff80, %g7
++ andcc %g1, 0xffffff80, %g0
+ be 3f
+ andcc %o0, 4, %g0
+
+@@ -579,22 +237,23 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
+ MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+- subcc %g7, 128, %g7
++ sub %g1, 128, %g1
+ add %o1, 128, %o1
+- bne 5b
++ cmp %g1, 128
++ bge 5b
+ add %o0, 128, %o0
+ 3:
+- andcc %g1, 0x70, %g7
++ andcc %g1, 0x70, %g4
+ be 80f
+ andcc %g1, 8, %g0
+
+ sethi %hi(80f), %o5
+- srl %g7, 1, %o4
+- add %g7, %o4, %o4
+- add %o1, %g7, %o1
++ srl %g4, 1, %o4
++ add %g4, %o4, %o4
++ add %o1, %g4, %o1
+ sub %o5, %o4, %o5
+ jmpl %o5 + %lo(80f), %g0
+- add %o0, %g7, %o0
++ add %o0, %g4, %o0
+
+ 79: /* memcpy_table */
+
+@@ -641,43 +300,28 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
+ stb %g2, [%o0]
+ 1:
+ retl
+- RETL_INSN
++ mov %g7, %o0
+
+ 82: /* ldd_std */
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+- subcc %g7, 128, %g7
++ subcc %g1, 128, %g1
+ add %o1, 128, %o1
+- bne 82b
++ cmp %g1, 128
++ bge 82b
+ add %o0, 128, %o0
+
+-#ifndef FASTER_ALIGNED
+-
+- andcc %g1, 0x70, %g7
+- be 80b
+- andcc %g1, 8, %g0
+-
+- sethi %hi(80b), %o5
+- srl %g7, 1, %o4
+- add %g7, %o4, %o4
+- add %o1, %g7, %o1
+- sub %o5, %o4, %o5
+- jmpl %o5 + %lo(80b), %g0
+- add %o0, %g7, %o0
+-
+-#else /* FASTER_ALIGNED */
+-
+- andcc %g1, 0x70, %g7
++ andcc %g1, 0x70, %g4
+ be 84f
+ andcc %g1, 8, %g0
+
+ sethi %hi(84f), %o5
+- add %o1, %g7, %o1
+- sub %o5, %g7, %o5
++ add %o1, %g4, %o1
++ sub %o5, %g4, %o5
+ jmpl %o5 + %lo(84f), %g0
+- add %o0, %g7, %o0
++ add %o0, %g4, %o0
+
+ 83: /* amemcpy_table */
+
+@@ -721,382 +365,132 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
+ stb %g2, [%o0]
+ 1:
+ retl
+- RETL_INSN
+-
+-#endif /* FASTER_ALIGNED */
++ mov %g7, %o0
+
+ 86: /* non_aligned */
+ cmp %o2, 6
+ bleu 88f
++ nop
+
+-#ifdef FASTER_NONALIGNED
+-
+- cmp %o2, 256
+- bcc 87f
+-
+-#endif /* FASTER_NONALIGNED */
+-
+- andcc %o0, 3, %g0
++ save %sp, -96, %sp
++ andcc %i0, 3, %g0
+ be 61f
+- andcc %o0, 1, %g0
++ andcc %i0, 1, %g0
+ be 60f
+- andcc %o0, 2, %g0
++ andcc %i0, 2, %g0
+
+- ldub [%o1], %g5
+- add %o1, 1, %o1
+- stb %g5, [%o0]
+- sub %o2, 1, %o2
++ ldub [%i1], %g5
++ add %i1, 1, %i1
++ stb %g5, [%i0]
++ sub %i2, 1, %i2
+ bne 61f
+- add %o0, 1, %o0
++ add %i0, 1, %i0
+ 60:
+- ldub [%o1], %g3
+- add %o1, 2, %o1
+- stb %g3, [%o0]
+- sub %o2, 2, %o2
+- ldub [%o1 - 1], %g3
+- add %o0, 2, %o0
+- stb %g3, [%o0 - 1]
++ ldub [%i1], %g3
++ add %i1, 2, %i1
++ stb %g3, [%i0]
++ sub %i2, 2, %i2
++ ldub [%i1 - 1], %g3
++ add %i0, 2, %i0
++ stb %g3, [%i0 - 1]
+ 61:
+- and %o1, 3, %g2
+- and %o2, 0xc, %g3
+- and %o1, -4, %o1
++ and %i1, 3, %g2
++ and %i2, 0xc, %g3
++ and %i1, -4, %i1
+ cmp %g3, 4
+ sll %g2, 3, %g4
+ mov 32, %g2
+ be 4f
+- sub %g2, %g4, %g7
++ sub %g2, %g4, %l0
+
+ blu 3f
+ cmp %g3, 0x8
+
+ be 2f
+- srl %o2, 2, %g3
++ srl %i2, 2, %g3
+
+- ld [%o1], %o3
+- add %o0, -8, %o0
+- ld [%o1 + 4], %o4
++ ld [%i1], %i3
++ add %i0, -8, %i0
++ ld [%i1 + 4], %i4
+ b 8f
+ add %g3, 1, %g3
+ 2:
+- ld [%o1], %o4
+- add %o0, -12, %o0
+- ld [%o1 + 4], %o5
++ ld [%i1], %i4
++ add %i0, -12, %i0
++ ld [%i1 + 4], %i5
+ add %g3, 2, %g3
+ b 9f
+- add %o1, -4, %o1
++ add %i1, -4, %i1
+ 3:
+- ld [%o1], %g1
+- add %o0, -4, %o0
+- ld [%o1 + 4], %o3
+- srl %o2, 2, %g3
++ ld [%i1], %g1
++ add %i0, -4, %i0
++ ld [%i1 + 4], %i3
++ srl %i2, 2, %g3
+ b 7f
+- add %o1, 4, %o1
++ add %i1, 4, %i1
+ 4:
+- ld [%o1], %o5
+- cmp %o2, 7
+- ld [%o1 + 4], %g1
+- srl %o2, 2, %g3
++ ld [%i1], %i5
++ cmp %i2, 7
++ ld [%i1 + 4], %g1
++ srl %i2, 2, %g3
+ bleu 10f
+- add %o1, 8, %o1
++ add %i1, 8, %i1
+
+- ld [%o1], %o3
++ ld [%i1], %i3
+ add %g3, -1, %g3
+ 5:
+- sll %o5, %g4, %g2
+- srl %g1, %g7, %g5
++ sll %i5, %g4, %g2
++ srl %g1, %l0, %g5
+ or %g2, %g5, %g2
+- st %g2, [%o0]
++ st %g2, [%i0]
+ 7:
+- ld [%o1 + 4], %o4
++ ld [%i1 + 4], %i4
+ sll %g1, %g4, %g2
+- srl %o3, %g7, %g5
++ srl %i3, %l0, %g5
+ or %g2, %g5, %g2
+- st %g2, [%o0 + 4]
++ st %g2, [%i0 + 4]
+ 8:
+- ld [%o1 + 8], %o5
+- sll %o3, %g4, %g2
+- srl %o4, %g7, %g5
++ ld [%i1 + 8], %i5
++ sll %i3, %g4, %g2
++ srl %i4, %l0, %g5
+ or %g2, %g5, %g2
+- st %g2, [%o0 + 8]
++ st %g2, [%i0 + 8]
+ 9:
+- ld [%o1 + 12], %g1
+- sll %o4, %g4, %g2
+- srl %o5, %g7, %g5
++ ld [%i1 + 12], %g1
++ sll %i4, %g4, %g2
++ srl %i5, %l0, %g5
+ addcc %g3, -4, %g3
+ or %g2, %g5, %g2
+- add %o1, 16, %o1
+- st %g2, [%o0 + 12]
+- add %o0, 16, %o0
++ add %i1, 16, %i1
++ st %g2, [%i0 + 12]
++ add %i0, 16, %i0
+ bne,a 5b
+- ld [%o1], %o3
++ ld [%i1], %i3
+ 10:
+- sll %o5, %g4, %g2
+- srl %g1, %g7, %g5
+- srl %g7, 3, %g3
++ sll %i5, %g4, %g2
++ srl %g1, %l0, %g5
++ srl %l0, 3, %g3
+ or %g2, %g5, %g2
+- sub %o1, %g3, %o1
+- andcc %o2, 2, %g0
+- st %g2, [%o0]
++ sub %i1, %g3, %i1
++ andcc %i2, 2, %g0
++ st %g2, [%i0]
+ be 1f
+- andcc %o2, 1, %g0
+-
+- ldub [%o1], %g2
+- add %o1, 2, %o1
+- stb %g2, [%o0 + 4]
+- add %o0, 2, %o0
+- ldub [%o1 - 1], %g2
+- stb %g2, [%o0 + 3]
++ andcc %i2, 1, %g0
++
++ ldub [%i1], %g2
++ add %i1, 2, %i1
++ stb %g2, [%i0 + 4]
++ add %i0, 2, %i0
++ ldub [%i1 - 1], %g2
++ stb %g2, [%i0 + 3]
+ 1:
+ be 1f
+ nop
+- ldub [%o1], %g2
+- stb %g2, [%o0 + 4]
+-1:
+- retl
+- RETL_INSN
+-
+-#ifdef FASTER_NONALIGNED
+-
+-87: /* faster_nonaligned */
+-
+- andcc %o1, 3, %g0
+- be 3f
+- andcc %o1, 1, %g0
+-
+- be 4f
+- andcc %o1, 2, %g0
+-
+- ldub [%o1], %g2
+- add %o1, 1, %o1
+- stb %g2, [%o0]
+- sub %o2, 1, %o2
+- bne 3f
+- add %o0, 1, %o0
+-4:
+- lduh [%o1], %g2
+- add %o1, 2, %o1
+- srl %g2, 8, %g3
+- sub %o2, 2, %o2
+- stb %g3, [%o0]
+- add %o0, 2, %o0
+- stb %g2, [%o0 - 1]
+-3:
+- andcc %o1, 4, %g0
+-
+- bne 2f
+- cmp %o5, 1
+-
+- ld [%o1], %o4
+- srl %o4, 24, %g2
+- stb %g2, [%o0]
+- srl %o4, 16, %g3
+- stb %g3, [%o0 + 1]
+- srl %o4, 8, %g2
+- stb %g2, [%o0 + 2]
+- sub %o2, 4, %o2
+- stb %o4, [%o0 + 3]
+- add %o1, 4, %o1
+- add %o0, 4, %o0
+-2:
+- be 33f
+- cmp %o5, 2
+- be 32f
+- sub %o2, 4, %o2
+-31:
+- ld [%o1], %g2
+- add %o1, 4, %o1
+- srl %g2, 24, %g3
+- and %o0, 7, %g5
+- stb %g3, [%o0]
+- cmp %g5, 7
+- sll %g2, 8, %g1
+- add %o0, 4, %o0
+- be 41f
+- and %o2, 0xffffffc0, %o3
+- ld [%o0 - 7], %o4
+-4:
+- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+- SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+- SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+- SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+- subcc %o3, 64, %o3
+- add %o1, 64, %o1
+- bne 4b
+- add %o0, 64, %o0
+-
+- andcc %o2, 0x30, %o3
+- be,a 1f
+- srl %g1, 16, %g2
+-4:
+- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+- subcc %o3, 16, %o3
+- add %o1, 16, %o1
+- bne 4b
+- add %o0, 16, %o0
+-
+- srl %g1, 16, %g2
+-1:
+- st %o4, [%o0 - 7]
+- sth %g2, [%o0 - 3]
+- srl %g1, 8, %g4
+- b 88f
+- stb %g4, [%o0 - 1]
+-32:
+- ld [%o1], %g2
+- add %o1, 4, %o1
+- srl %g2, 16, %g3
+- and %o0, 7, %g5
+- sth %g3, [%o0]
+- cmp %g5, 6
+- sll %g2, 16, %g1
+- add %o0, 4, %o0
+- be 42f
+- and %o2, 0xffffffc0, %o3
+- ld [%o0 - 6], %o4
+-4:
+- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+- SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+- SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+- SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+- subcc %o3, 64, %o3
+- add %o1, 64, %o1
+- bne 4b
+- add %o0, 64, %o0
+-
+- andcc %o2, 0x30, %o3
+- be,a 1f
+- srl %g1, 16, %g2
+-4:
+- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+- subcc %o3, 16, %o3
+- add %o1, 16, %o1
+- bne 4b
+- add %o0, 16, %o0
+-
+- srl %g1, 16, %g2
+-1:
+- st %o4, [%o0 - 6]
+- b 88f
+- sth %g2, [%o0 - 2]
+-33:
+- ld [%o1], %g2
+- sub %o2, 4, %o2
+- srl %g2, 24, %g3
+- and %o0, 7, %g5
+- stb %g3, [%o0]
+- cmp %g5, 5
+- srl %g2, 8, %g4
+- sll %g2, 24, %g1
+- sth %g4, [%o0 + 1]
+- add %o1, 4, %o1
+- be 43f
+- and %o2, 0xffffffc0, %o3
+-
+- ld [%o0 - 1], %o4
+- add %o0, 4, %o0
+-4:
+- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+- SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+- SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+- SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+- subcc %o3, 64, %o3
+- add %o1, 64, %o1
+- bne 4b
+- add %o0, 64, %o0
+-
+- andcc %o2, 0x30, %o3
+- be,a 1f
+- srl %g1, 24, %g2
+-4:
+- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+- subcc %o3, 16, %o3
+- add %o1, 16, %o1
+- bne 4b
+- add %o0, 16, %o0
+-
+- srl %g1, 24, %g2
+-1:
+- st %o4, [%o0 - 5]
+- b 88f
+- stb %g2, [%o0 - 1]
+-41:
+- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+- SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+- SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+- SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+- subcc %o3, 64, %o3
+- add %o1, 64, %o1
+- bne 41b
+- add %o0, 64, %o0
+-
+- andcc %o2, 0x30, %o3
+- be,a 1f
+- srl %g1, 16, %g2
+-4:
+- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+- subcc %o3, 16, %o3
+- add %o1, 16, %o1
+- bne 4b
+- add %o0, 16, %o0
+-
+- srl %g1, 16, %g2
++ ldub [%i1], %g2
++ stb %g2, [%i0 + 4]
+ 1:
+- sth %g2, [%o0 - 3]
+- srl %g1, 8, %g4
+- b 88f
+- stb %g4, [%o0 - 1]
+-43:
+- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+- SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+- SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+- SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+- subcc %o3, 64, %o3
+- add %o1, 64, %o1
+- bne 43b
+- add %o0, 64, %o0
+-
+- andcc %o2, 0x30, %o3
+- be,a 1f
+- srl %g1, 24, %g2
+-4:
+- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+- subcc %o3, 16, %o3
+- add %o1, 16, %o1
+- bne 4b
+- add %o0, 16, %o0
+-
+- srl %g1, 24, %g2
+-1:
+- stb %g2, [%o0 + 3]
+- b 88f
+- add %o0, 4, %o0
+-42:
+- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+- SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+- SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+- SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+- subcc %o3, 64, %o3
+- add %o1, 64, %o1
+- bne 42b
+- add %o0, 64, %o0
+-
+- andcc %o2, 0x30, %o3
+- be,a 1f
+- srl %g1, 16, %g2
+-4:
+- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+- subcc %o3, 16, %o3
+- add %o1, 16, %o1
+- bne 4b
+- add %o0, 16, %o0
+-
+- srl %g1, 16, %g2
+-1:
+- sth %g2, [%o0 - 2]
+-
+- /* Fall through */
+-
+-#endif /* FASTER_NONALIGNED */
++ ret
++ restore %g7, %g0, %o0
+
+ 88: /* short_end */
+
+@@ -1127,7 +521,7 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
+ stb %g2, [%o0]
+ 1:
+ retl
+- RETL_INSN
++ mov %g7, %o0
+
+ 90: /* short_aligned_end */
+ bne 88b
+diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
+index e3cda21..301421c 100644
+--- a/arch/sparc/mm/Makefile
++++ b/arch/sparc/mm/Makefile
+@@ -8,7 +8,6 @@ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
+ obj-y += fault_$(BITS).o
+ obj-y += init_$(BITS).o
+ obj-$(CONFIG_SPARC32) += loadmmu.o
+-obj-y += generic_$(BITS).o
+ obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o
+ obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
+ obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
+diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
+index 5175ac2..8a7f817 100644
+--- a/arch/sparc/mm/btfixup.c
++++ b/arch/sparc/mm/btfixup.c
+@@ -302,8 +302,7 @@ void __init btfixup(void)
+ case 'i': /* INT */
+ if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
+ set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
+- else if ((insn & 0x80002000) == 0x80002000 &&
+- (insn & 0x01800000) != 0x01800000) /* %LO */
++ else if ((insn & 0x80002000) == 0x80002000) /* %LO */
+ set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
+ else {
+ prom_printf(insn_i, p, addr, insn);
+diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
+deleted file mode 100644
+index e6067b7..0000000
+--- a/arch/sparc/mm/generic_32.c
++++ /dev/null
+@@ -1,98 +0,0 @@
+-/*
+- * generic.c: Generic Sparc mm routines that are not dependent upon
+- * MMU type but are Sparc specific.
+- *
+- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/mm.h>
+-#include <linux/swap.h>
+-#include <linux/pagemap.h>
+-
+-#include <asm/pgalloc.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/cacheflush.h>
+-#include <asm/tlbflush.h>
+-
+-/* Remap IO memory, the same way as remap_pfn_range(), but use
+- * the obio memory space.
+- *
+- * They use a pgprot that sets PAGE_IO and does not check the
+- * mem_map table as this is independent of normal memory.
+- */
+-static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
+- unsigned long offset, pgprot_t prot, int space)
+-{
+- unsigned long end;
+-
+- address &= ~PMD_MASK;
+- end = address + size;
+- if (end > PMD_SIZE)
+- end = PMD_SIZE;
+- do {
+- set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space));
+- address += PAGE_SIZE;
+- offset += PAGE_SIZE;
+- pte++;
+- } while (address < end);
+-}
+-
+-static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
+- unsigned long offset, pgprot_t prot, int space)
+-{
+- unsigned long end;
+-
+- address &= ~PGDIR_MASK;
+- end = address + size;
+- if (end > PGDIR_SIZE)
+- end = PGDIR_SIZE;
+- offset -= address;
+- do {
+- pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
+- if (!pte)
+- return -ENOMEM;
+- io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
+- address = (address + PMD_SIZE) & PMD_MASK;
+- pmd++;
+- } while (address < end);
+- return 0;
+-}
+-
+-int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+- unsigned long pfn, unsigned long size, pgprot_t prot)
+-{
+- int error = 0;
+- pgd_t * dir;
+- unsigned long beg = from;
+- unsigned long end = from + size;
+- struct mm_struct *mm = vma->vm_mm;
+- int space = GET_IOSPACE(pfn);
+- unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+-
+- /* See comment in mm/memory.c remap_pfn_range */
+- vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+- vma->vm_pgoff = (offset >> PAGE_SHIFT) |
+- ((unsigned long)space << 28UL);
+-
+- offset -= from;
+- dir = pgd_offset(mm, from);
+- flush_cache_range(vma, beg, end);
+-
+- while (from < end) {
+- pmd_t *pmd = pmd_alloc(mm, dir, from);
+- error = -ENOMEM;
+- if (!pmd)
+- break;
+- error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
+- if (error)
+- break;
+- from = (from + PGDIR_SIZE) & PGDIR_MASK;
+- dir++;
+- }
+-
+- flush_tlb_range(vma, beg, end);
+- return error;
+-}
+-EXPORT_SYMBOL(io_remap_pfn_range);
+diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
+deleted file mode 100644
+index 3cb00df..0000000
+--- a/arch/sparc/mm/generic_64.c
++++ /dev/null
+@@ -1,164 +0,0 @@
+-/*
+- * generic.c: Generic Sparc mm routines that are not dependent upon
+- * MMU type but are Sparc specific.
+- *
+- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/mm.h>
+-#include <linux/swap.h>
+-#include <linux/pagemap.h>
+-
+-#include <asm/pgalloc.h>
+-#include <asm/pgtable.h>
+-#include <asm/page.h>
+-#include <asm/tlbflush.h>
+-
+-/* Remap IO memory, the same way as remap_pfn_range(), but use
+- * the obio memory space.
+- *
+- * They use a pgprot that sets PAGE_IO and does not check the
+- * mem_map table as this is independent of normal memory.
+- */
+-static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
+- unsigned long address,
+- unsigned long size,
+- unsigned long offset, pgprot_t prot,
+- int space)
+-{
+- unsigned long end;
+-
+- /* clear hack bit that was used as a write_combine side-effect flag */
+- offset &= ~0x1UL;
+- address &= ~PMD_MASK;
+- end = address + size;
+- if (end > PMD_SIZE)
+- end = PMD_SIZE;
+- do {
+- pte_t entry;
+- unsigned long curend = address + PAGE_SIZE;
+-
+- entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
+- if (!(address & 0xffff)) {
+- if (PAGE_SIZE < (4 * 1024 * 1024) &&
+- !(address & 0x3fffff) &&
+- !(offset & 0x3ffffe) &&
+- end >= address + 0x400000) {
+- entry = mk_pte_io(offset, prot, space,
+- 4 * 1024 * 1024);
+- curend = address + 0x400000;
+- offset += 0x400000;
+- } else if (PAGE_SIZE < (512 * 1024) &&
+- !(address & 0x7ffff) &&
+- !(offset & 0x7fffe) &&
+- end >= address + 0x80000) {
+- entry = mk_pte_io(offset, prot, space,
+- 512 * 1024 * 1024);
+- curend = address + 0x80000;
+- offset += 0x80000;
+- } else if (PAGE_SIZE < (64 * 1024) &&
+- !(offset & 0xfffe) &&
+- end >= address + 0x10000) {
+- entry = mk_pte_io(offset, prot, space,
+- 64 * 1024);
+- curend = address + 0x10000;
+- offset += 0x10000;
+- } else
+- offset += PAGE_SIZE;
+- } else
+- offset += PAGE_SIZE;
+-
+- if (pte_write(entry))
+- entry = pte_mkdirty(entry);
+- do {
+- BUG_ON(!pte_none(*pte));
+- set_pte_at(mm, address, pte, entry);
+- address += PAGE_SIZE;
+- pte_val(entry) += PAGE_SIZE;
+- pte++;
+- } while (address < curend);
+- } while (address < end);
+-}
+-
+-static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
+- unsigned long offset, pgprot_t prot, int space)
+-{
+- unsigned long end;
+-
+- address &= ~PGDIR_MASK;
+- end = address + size;
+- if (end > PGDIR_SIZE)
+- end = PGDIR_SIZE;
+- offset -= address;
+- do {
+- pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
+- if (!pte)
+- return -ENOMEM;
+- io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
+- pte_unmap(pte);
+- address = (address + PMD_SIZE) & PMD_MASK;
+- pmd++;
+- } while (address < end);
+- return 0;
+-}
+-
+-static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
+- unsigned long offset, pgprot_t prot, int space)
+-{
+- unsigned long end;
+-
+- address &= ~PUD_MASK;
+- end = address + size;
+- if (end > PUD_SIZE)
+- end = PUD_SIZE;
+- offset -= address;
+- do {
+- pmd_t *pmd = pmd_alloc(mm, pud, address);
+- if (!pud)
+- return -ENOMEM;
+- io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
+- address = (address + PUD_SIZE) & PUD_MASK;
+- pud++;
+- } while (address < end);
+- return 0;
+-}
+-
+-int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+- unsigned long pfn, unsigned long size, pgprot_t prot)
+-{
+- int error = 0;
+- pgd_t * dir;
+- unsigned long beg = from;
+- unsigned long end = from + size;
+- struct mm_struct *mm = vma->vm_mm;
+- int space = GET_IOSPACE(pfn);
+- unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+- unsigned long phys_base;
+-
+- phys_base = offset | (((unsigned long) space) << 32UL);
+-
+- /* See comment in mm/memory.c remap_pfn_range */
+- vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+- vma->vm_pgoff = phys_base >> PAGE_SHIFT;
+-
+- offset -= from;
+- dir = pgd_offset(mm, from);
+- flush_cache_range(vma, beg, end);
+-
+- while (from < end) {
+- pud_t *pud = pud_alloc(mm, dir, from);
+- error = -ENOMEM;
+- if (!pud)
+- break;
+- error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
+- if (error)
+- break;
+- from = (from + PGDIR_SIZE) & PGDIR_MASK;
+- dir++;
+- }
+-
+- flush_tlb_range(vma, beg, end);
+- return error;
+-}
+-EXPORT_SYMBOL(io_remap_pfn_range);
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index bfab3fa..7b65f75 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
+ break;
+ }
+ if (filter[i].jt != 0) {
+- if (filter[i].jf)
+- t_offset += is_near(f_offset) ? 2 : 6;
++ if (filter[i].jf && f_offset)
++ t_offset += is_near(f_offset) ? 2 : 5;
+ EMIT_COND_JMP(t_op, t_offset);
+ if (filter[i].jf)
+ EMIT_JMP(f_offset);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 795154e..8fc4ae2 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -418,6 +418,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+ q->backing_dev_info.state = 0;
+ q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+ q->backing_dev_info.name = "block";
++ q->node = node_id;
+
+ err = bdi_init(&q->backing_dev_info);
+ if (err) {
+@@ -502,7 +503,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
+ if (!uninit_q)
+ return NULL;
+
+- q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
++ q = blk_init_allocated_queue(uninit_q, rfn, lock);
+ if (!q)
+ blk_cleanup_queue(uninit_q);
+
+@@ -514,18 +515,9 @@ struct request_queue *
+ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ spinlock_t *lock)
+ {
+- return blk_init_allocated_queue_node(q, rfn, lock, -1);
+-}
+-EXPORT_SYMBOL(blk_init_allocated_queue);
+-
+-struct request_queue *
+-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
+- spinlock_t *lock, int node_id)
+-{
+ if (!q)
+ return NULL;
+
+- q->node = node_id;
+ if (blk_init_free_list(q))
+ return NULL;
+
+@@ -555,7 +547,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
+
+ return NULL;
+ }
+-EXPORT_SYMBOL(blk_init_allocated_queue_node);
++EXPORT_SYMBOL(blk_init_allocated_queue);
+
+ int blk_get_queue(struct request_queue *q)
+ {
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index 16ace89..4c12869 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -3184,7 +3184,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
+ }
+ }
+
+- if (ret)
++ if (ret && ret != -EEXIST)
+ printk(KERN_ERR "cfq: cic link failed!\n");
+
+ return ret;
+@@ -3200,6 +3200,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+ {
+ struct io_context *ioc = NULL;
+ struct cfq_io_context *cic;
++ int ret;
+
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+@@ -3207,6 +3208,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+ if (!ioc)
+ return NULL;
+
++retry:
+ cic = cfq_cic_lookup(cfqd, ioc);
+ if (cic)
+ goto out;
+@@ -3215,7 +3217,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+ if (cic == NULL)
+ goto err;
+
+- if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
++ ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
++ if (ret == -EEXIST) {
++ /* someone has linked cic to ioc already */
++ cfq_cic_free(cic);
++ goto retry;
++ } else if (ret)
+ goto err_free;
+
+ out:
+@@ -4036,6 +4043,11 @@ static void *cfq_init_queue(struct request_queue *q)
+
+ if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
+ kfree(cfqg);
++
++ spin_lock(&cic_index_lock);
++ ida_remove(&cic_index_ida, cfqd->cic_index);
++ spin_unlock(&cic_index_lock);
++
+ kfree(cfqd);
+ return NULL;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index 8a3942c..c72b590 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1453,6 +1453,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+
+ diff1 = now - dev_priv->last_time1;
+
++ /* Prevent division-by-zero if we are asking too fast.
++ * Also, we don't get interesting results if we are polling
++ * faster than once in 10ms, so just return the saved value
++ * in such cases.
++ */
++ if (diff1 <= 10)
++ return dev_priv->chipset_power;
++
+ count1 = I915_READ(DMIEC);
+ count2 = I915_READ(DDREC);
+ count3 = I915_READ(CSIEC);
+@@ -1483,6 +1491,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+ dev_priv->last_count1 = total_count;
+ dev_priv->last_time1 = now;
+
++ dev_priv->chipset_power = ret;
++
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 7916bd9..1a2a2d1 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -707,6 +707,7 @@ typedef struct drm_i915_private {
+
+ u64 last_count1;
+ unsigned long last_time1;
++ unsigned long chipset_power;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index ad381a2..2ae29de 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -3271,10 +3271,10 @@
+ /* or SDVOB */
+ #define HDMIB 0xe1140
+ #define PORT_ENABLE (1 << 31)
+-#define TRANSCODER_A (0)
+-#define TRANSCODER_B (1 << 30)
+-#define TRANSCODER(pipe) ((pipe) << 30)
+-#define TRANSCODER_MASK (1 << 30)
++#define TRANSCODER(pipe) ((pipe) << 30)
++#define TRANSCODER_CPT(pipe) ((pipe) << 29)
++#define TRANSCODER_MASK (1 << 30)
++#define TRANSCODER_MASK_CPT (3 << 29)
+ #define COLOR_FORMAT_8bpc (0)
+ #define COLOR_FORMAT_12bpc (3 << 26)
+ #define SDVOB_HOTPLUG_ENABLE (1 << 23)
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index 6348c49..ac0c323 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -1085,8 +1085,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ }
+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+ }
+- if (intel_crtc->pipe == 1)
+- sdvox |= SDVO_PIPE_B_SELECT;
++
++ if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
++ sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
++ else
++ sdvox |= TRANSCODER(intel_crtc->pipe);
++
+ if (intel_sdvo->has_hdmi_audio)
+ sdvox |= SDVO_AUDIO_ENABLE;
+
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index d4ee6f0..c3f0d42 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -3258,6 +3258,18 @@ int evergreen_init(struct radeon_device *rdev)
+ rdev->accel_working = false;
+ }
+ }
++
++ /* Don't start up if the MC ucode is missing on BTC parts.
++ * The default clocks and voltages before the MC ucode
++ * is loaded are not suffient for advanced operations.
++ */
++ if (ASIC_IS_DCE5(rdev)) {
++ if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
++ DRM_ERROR("radeon: MC ucode required for NI+.\n");
++ return -EINVAL;
++ }
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 285acc4..a098edc 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -2568,7 +2568,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
+- rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
++ if (rdev->pm.default_power_state_index >= 0)
++ rdev->pm.current_vddc =
++ rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
++ else
++ rdev->pm.current_vddc = 0;
+ }
+
+ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index fa643f4..059a865 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -1144,7 +1144,8 @@ err_reg:
+
+ err_counter:
+ for (; i; --i)
+- mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
++ if (ibdev->counters[i - 1] != -1)
++ mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+
+ err_map:
+ iounmap(ibdev->uar_map);
+@@ -1175,7 +1176,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
+ }
+ iounmap(ibdev->uar_map);
+ for (p = 0; p < ibdev->num_ports; ++p)
+- mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
++ if (ibdev->counters[p] != -1)
++ mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+ mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
+ mlx4_CLOSE_PORT(dev, p);
+
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 5538fc6..7675363 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -24,6 +24,7 @@
+ */
+
+ #include <linux/module.h>
++#include <linux/delay.h>
+ #include <linux/dmi.h>
+ #include <linux/input/mt.h>
+ #include <linux/serio.h>
+@@ -786,6 +787,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
+
+ do {
+ psmouse_reset(psmouse);
++ if (retry) {
++ /*
++ * On some boxes, right after resuming, the touchpad
++ * needs some time to finish initializing (I assume
++ * it needs time to calibrate) and start responding
++ * to Synaptics-specific queries, so let's wait a
++ * bit.
++ */
++ ssleep(1);
++ }
+ error = synaptics_detect(psmouse, 0);
+ } while (error && ++retry < 3);
+
+diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
+index b3a5ecd..3422da0 100644
+--- a/drivers/media/video/omap/omap_vout.c
++++ b/drivers/media/video/omap/omap_vout.c
+@@ -38,6 +38,7 @@
+ #include <linux/irq.h>
+ #include <linux/videodev2.h>
+ #include <linux/dma-mapping.h>
++#include <linux/slab.h>
+
+ #include <media/videobuf-dma-contig.h>
+ #include <media/v4l2-device.h>
+diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
+index aa55066..b062b1a 100644
+--- a/drivers/media/video/s5p-fimc/fimc-core.c
++++ b/drivers/media/video/s5p-fimc/fimc-core.c
+@@ -35,7 +35,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
+ static struct fimc_fmt fimc_formats[] = {
+ {
+ .name = "RGB565",
+- .fourcc = V4L2_PIX_FMT_RGB565X,
++ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = { 16 },
+ .color = S5P_FIMC_RGB565,
+ .memplanes = 1,
+diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
+index b8eef46..35cdc80 100644
+--- a/drivers/mfd/twl-core.c
++++ b/drivers/mfd/twl-core.c
+@@ -362,13 +362,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
+ pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+ return -EPERM;
+ }
+- sid = twl_map[mod_no].sid;
+- twl = &twl_modules[sid];
+-
+ if (unlikely(!inuse)) {
+- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
++ pr_err("%s: not initialized\n", DRIVER_NAME);
+ return -EPERM;
+ }
++ sid = twl_map[mod_no].sid;
++ twl = &twl_modules[sid];
++
+ mutex_lock(&twl->xfer_lock);
+ /*
+ * [MSG1]: fill the register address data
+@@ -419,13 +419,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
+ pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
+ return -EPERM;
+ }
+- sid = twl_map[mod_no].sid;
+- twl = &twl_modules[sid];
+-
+ if (unlikely(!inuse)) {
+- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
++ pr_err("%s: not initialized\n", DRIVER_NAME);
+ return -EPERM;
+ }
++ sid = twl_map[mod_no].sid;
++ twl = &twl_modules[sid];
++
+ mutex_lock(&twl->xfer_lock);
+ /* [MSG1] fill the register address data */
+ msg = &twl->xfer_msg[0];
+diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
+index 7cbf2aa..834f824 100644
+--- a/drivers/mfd/twl4030-madc.c
++++ b/drivers/mfd/twl4030-madc.c
+@@ -740,6 +740,28 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
+ TWL4030_BCI_BCICTL1);
+ goto err_i2c;
+ }
++
++ /* Check that MADC clock is on */
++ ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &regval, TWL4030_REG_GPBR1);
++ if (ret) {
++ dev_err(&pdev->dev, "unable to read reg GPBR1 0x%X\n",
++ TWL4030_REG_GPBR1);
++ goto err_i2c;
++ }
++
++ /* If MADC clk is not on, turn it on */
++ if (!(regval & TWL4030_GPBR1_MADC_HFCLK_EN)) {
++ dev_info(&pdev->dev, "clk disabled, enabling\n");
++ regval |= TWL4030_GPBR1_MADC_HFCLK_EN;
++ ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, regval,
++ TWL4030_REG_GPBR1);
++ if (ret) {
++ dev_err(&pdev->dev, "unable to write reg GPBR1 0x%X\n",
++ TWL4030_REG_GPBR1);
++ goto err_i2c;
++ }
++ }
++
+ platform_set_drvdata(pdev, madc);
+ mutex_init(&madc->lock);
+ ret = request_threaded_irq(platform_get_irq(pdev, 0), NULL,
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index 56e9a41..d8eac24 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -673,7 +673,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
+ unsigned int status)
+ {
+ /* First check for errors */
+- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
++ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
++ MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ u32 remain, success;
+
+ /* Terminate the DMA transfer */
+@@ -752,8 +753,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
+ }
+
+ if (!cmd->data || cmd->error) {
+- if (host->data)
++ if (host->data) {
++ /* Terminate the DMA transfer */
++ if (dma_inprogress(host))
++ mmci_dma_data_error(host);
+ mmci_stop_data(host);
++ }
+ mmci_request_end(host, cmd->mrq);
+ } else if (!(cmd->data->flags & MMC_DATA_READ)) {
+ mmci_start_data(host, cmd->data);
+@@ -953,8 +958,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
+ dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
+
+ data = host->data;
+- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
+- MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
++ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
++ MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
++ MCI_DATABLOCKEND) && data)
+ mmci_data_irq(host, data, status);
+
+ cmd = host->cmd;
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index e8f6e65..2ec978b 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
+ static int firmware_rom_wait_states = 0x1C;
+ #endif
+
+-module_param(firmware_rom_wait_states, bool, 0644);
++module_param(firmware_rom_wait_states, int, 0644);
+ MODULE_PARM_DESC(firmware_rom_wait_states,
+ "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
+
+diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
+index 89f829f..f8a6853 100644
+--- a/drivers/net/pptp.c
++++ b/drivers/net/pptp.c
+@@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ lock_sock(sk);
+
+ opt->src_addr = sp->sa_addr.pptp;
+- if (add_chan(po)) {
+- release_sock(sk);
++ if (add_chan(po))
+ error = -EBUSY;
+- }
+
+ release_sock(sk);
+ return error;
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 722967b..69736d8 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1841,6 +1841,9 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
+ struct ath_softc *sc = hw->priv;
+ struct ath_node *an = (struct ath_node *) sta->drv_priv;
+
++ if (!(sc->sc_flags & SC_OP_TXAGGR))
++ return;
++
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ an->sleeping = true;
+diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
+index c04a6c3..297d762 100644
+--- a/drivers/net/wireless/ath/ath9k/rc.c
++++ b/drivers/net/wireless/ath/ath9k/rc.c
+@@ -1250,7 +1250,9 @@ static void ath_rc_init(struct ath_softc *sc,
+
+ ath_rc_priv->max_valid_rate = k;
+ ath_rc_sort_validrates(rate_table, ath_rc_priv);
+- ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
++ ath_rc_priv->rate_max_phy = (k > 4) ?
++ ath_rc_priv->valid_rate_index[k-4] :
++ ath_rc_priv->valid_rate_index[k-1];
+ ath_rc_priv->rate_table = rate_table;
+
+ ath_dbg(common, ATH_DBG_CONFIG,
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+index eabbf1a..5493f94 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+@@ -620,8 +620,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
+ if (ctx->ht.enabled) {
+ /* if HT40 is used, it should not change
+ * after associated except channel switch */
+- if (iwl_is_associated_ctx(ctx) &&
+- !ctx->ht.is_40mhz)
++ if (!ctx->ht.is_40mhz ||
++ !iwl_is_associated_ctx(ctx))
+ iwlagn_config_ht40(conf, ctx);
+ } else
+ ctx->ht.is_40mhz = false;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+index 53bb59e..475f9d4 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+@@ -166,7 +166,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
++ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
++ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
++ else
++ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
+index 41f0de9..32eb4fe 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
+@@ -1068,9 +1068,7 @@ static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
+
+ /* Set up entry for this TFD in Tx byte-count array */
+- if (ampdu)
+- iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
+- le16_to_cpu(tx_cmd->len));
++ iwl_trans_txq_update_byte_cnt_tbl(priv, txq, le16_to_cpu(tx_cmd->len));
+
+ dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
+ DMA_BIDIRECTIONAL);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+index 592a10a..3b585aa 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+@@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ }
+ case ERFSLEEP:{
+ if (ppsc->rfpwr_state == ERFOFF)
+- break;
++ return false;
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+index 7285290..e49cf22 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+@@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ break;
+ case ERFSLEEP:
+ if (ppsc->rfpwr_state == ERFOFF)
+- break;
++ return false;
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+index 3ac7af1..0883349 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+@@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ break;
+ case ERFSLEEP:
+ if (ppsc->rfpwr_state == ERFOFF)
+- break;
++ return false;
+
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+index f27171a..f10ac1a 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+@@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ }
+ case ERFSLEEP:
+ if (ppsc->rfpwr_state == ERFOFF)
+- break;
++ return false;
+
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
+index 89f6345..84a208d 100644
+--- a/drivers/oprofile/oprofile_files.c
++++ b/drivers/oprofile/oprofile_files.c
+@@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+- if (retval)
++ if (retval <= 0)
+ return retval;
+
+ retval = oprofile_set_timeout(val);
+@@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+- if (retval)
++ if (retval <= 0)
+ return retval;
+
+ retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
+@@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+- if (retval)
++ if (retval <= 0)
+ return retval;
+
++ retval = 0;
+ if (val)
+ retval = oprofile_start();
+ else
+diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
+index e9ff6f7..1c0b799 100644
+--- a/drivers/oprofile/oprofilefs.c
++++ b/drivers/oprofile/oprofilefs.c
+@@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
+ }
+
+
++/*
++ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
++ * unchanged and might be uninitialized. This follows write syscall
++ * implementation when count is zero: "If count is zero ... [and if]
++ * no errors are detected, 0 will be returned without causing any
++ * other effect." (man 2 write)
++ */
+ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
+ {
+ char tmpbuf[TMPBUFSIZE];
+@@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
+ spin_lock_irqsave(&oprofilefs_lock, flags);
+ *val = simple_strtoul(tmpbuf, NULL, 0);
+ spin_unlock_irqrestore(&oprofilefs_lock, flags);
+- return 0;
++ return count;
+ }
+
+
+@@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&value, buf, count);
+- if (retval)
++ if (retval <= 0)
+ return retval;
+
+ retval = oprofile_set_ulong(file->private_data, value);
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index 3d9d2b9..44e91e5 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -318,20 +318,6 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ }
+ EXPORT_SYMBOL_GPL(rtc_read_alarm);
+
+-static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+-{
+- int err;
+-
+- if (!rtc->ops)
+- err = -ENODEV;
+- else if (!rtc->ops->set_alarm)
+- err = -EINVAL;
+- else
+- err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+-
+- return err;
+-}
+-
+ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ {
+ struct rtc_time tm;
+@@ -355,7 +341,14 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ * over right here, before we set the alarm.
+ */
+
+- return ___rtc_set_alarm(rtc, alarm);
++ if (!rtc->ops)
++ err = -ENODEV;
++ else if (!rtc->ops->set_alarm)
++ err = -EINVAL;
++ else
++ err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
++
++ return err;
+ }
+
+ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+@@ -769,20 +762,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+ return 0;
+ }
+
+-static void rtc_alarm_disable(struct rtc_device *rtc)
+-{
+- struct rtc_wkalrm alarm;
+- struct rtc_time tm;
+-
+- __rtc_read_time(rtc, &tm);
+-
+- alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
+- ktime_set(300, 0)));
+- alarm.enabled = 0;
+-
+- ___rtc_set_alarm(rtc, &alarm);
+-}
+-
+ /**
+ * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
+ * @rtc rtc device
+@@ -804,10 +783,8 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
+ struct rtc_wkalrm alarm;
+ int err;
+ next = timerqueue_getnext(&rtc->timerqueue);
+- if (!next) {
+- rtc_alarm_disable(rtc);
++ if (!next)
+ return;
+- }
+ alarm.time = rtc_ktime_to_tm(next->expires);
+ alarm.enabled = 1;
+ err = __rtc_set_alarm(rtc, &alarm);
+@@ -869,8 +846,7 @@ again:
+ err = __rtc_set_alarm(rtc, &alarm);
+ if (err == -ETIME)
+ goto again;
+- } else
+- rtc_alarm_disable(rtc);
++ }
+
+ mutex_unlock(&rtc->ops_lock);
+ }
+diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
+index eda128f..64aedd8 100644
+--- a/drivers/rtc/rtc-m41t80.c
++++ b/drivers/rtc/rtc-m41t80.c
+@@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+ static struct rtc_class_ops m41t80_rtc_ops = {
+ .read_time = m41t80_rtc_read_time,
+ .set_time = m41t80_rtc_set_time,
++ /*
++ * XXX - m41t80 alarm functionality is reported broken.
++ * until it is fixed, don't register alarm functions.
++ *
+ .read_alarm = m41t80_rtc_read_alarm,
+ .set_alarm = m41t80_rtc_set_alarm,
++ */
+ .proc = m41t80_rtc_proc,
++ /*
++ * See above comment on broken alarm
++ *
+ .alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
++ */
+ };
+
+ #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index 7cac873..169ba7b 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -57,6 +57,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
+ {
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
++ /* if previous slave_alloc returned early, there is nothing to do */
++ if (!zfcp_sdev->port)
++ return;
++
+ zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
+ put_device(&zfcp_sdev->port->dev);
+ }
+diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
+index 5d0e9a2..8858170 100644
+--- a/drivers/scsi/fcoe/fcoe.c
++++ b/drivers/scsi/fcoe/fcoe.c
+@@ -1635,6 +1635,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
+ stats->InvalidCRCCount++;
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
++ put_cpu();
+ return -EINVAL;
+ }
+
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index 83035bd..39e81cd 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -1082,41 +1082,6 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
+ }
+
+ /**
+- * _base_save_msix_table - backup msix vector table
+- * @ioc: per adapter object
+- *
+- * This address an errata where diag reset clears out the table
+- */
+-static void
+-_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc)
+-{
+- int i;
+-
+- if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
+- return;
+-
+- for (i = 0; i < ioc->msix_vector_count; i++)
+- ioc->msix_table_backup[i] = ioc->msix_table[i];
+-}
+-
+-/**
+- * _base_restore_msix_table - this restores the msix vector table
+- * @ioc: per adapter object
+- *
+- */
+-static void
+-_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc)
+-{
+- int i;
+-
+- if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
+- return;
+-
+- for (i = 0; i < ioc->msix_vector_count; i++)
+- ioc->msix_table[i] = ioc->msix_table_backup[i];
+-}
+-
+-/**
+ * _base_check_enable_msix - checks MSIX capabable.
+ * @ioc: per adapter object
+ *
+@@ -1128,7 +1093,7 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
+ {
+ int base;
+ u16 message_control;
+- u32 msix_table_offset;
++
+
+ base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
+ if (!base) {
+@@ -1141,14 +1106,8 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
+ pci_read_config_word(ioc->pdev, base + 2, &message_control);
+ ioc->msix_vector_count = (message_control & 0x3FF) + 1;
+
+- /* get msix table */
+- pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset);
+- msix_table_offset &= 0xFFFFFFF8;
+- ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset);
+-
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
+- "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name,
+- ioc->msix_vector_count, msix_table_offset, ioc->msix_table));
++ "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
+ return 0;
+ }
+
+@@ -1162,8 +1121,6 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
+ {
+ if (ioc->msix_enable) {
+ pci_disable_msix(ioc->pdev);
+- kfree(ioc->msix_table_backup);
+- ioc->msix_table_backup = NULL;
+ ioc->msix_enable = 0;
+ }
+ }
+@@ -1189,14 +1146,6 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
+ if (_base_check_enable_msix(ioc) != 0)
+ goto try_ioapic;
+
+- ioc->msix_table_backup = kcalloc(ioc->msix_vector_count,
+- sizeof(u32), GFP_KERNEL);
+- if (!ioc->msix_table_backup) {
+- dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
+- "msix_table_backup failed!!!\n", ioc->name));
+- goto try_ioapic;
+- }
+-
+ memset(&entries, 0, sizeof(struct msix_entry));
+ r = pci_enable_msix(ioc->pdev, &entries, 1);
+ if (r) {
+@@ -3513,9 +3462,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ u32 hcb_size;
+
+ printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
+-
+- _base_save_msix_table(ioc);
+-
+ drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
+ ioc->name));
+
+@@ -3611,7 +3557,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ goto out;
+ }
+
+- _base_restore_msix_table(ioc);
+ printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
+ return 0;
+
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
+index 8d5be21..7df640f 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
+@@ -636,8 +636,6 @@ enum mutex_type {
+ * @wait_for_port_enable_to_complete:
+ * @msix_enable: flag indicating msix is enabled
+ * @msix_vector_count: number msix vectors
+- * @msix_table: virt address to the msix table
+- * @msix_table_backup: backup msix table
+ * @scsi_io_cb_idx: shost generated commands
+ * @tm_cb_idx: task management commands
+ * @scsih_cb_idx: scsih internal commands
+@@ -779,8 +777,6 @@ struct MPT2SAS_ADAPTER {
+
+ u8 msix_enable;
+ u16 msix_vector_count;
+- u32 *msix_table;
+- u32 *msix_table_backup;
+ u32 ioc_reset_count;
+
+ /* internal commands, callback index */
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index 97aac82..d3b3567 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -4210,7 +4210,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+ /* insert into event log */
+ sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
+ sizeof(Mpi2EventDataSasDeviceStatusChange_t);
+- event_reply = kzalloc(sz, GFP_KERNEL);
++ event_reply = kzalloc(sz, GFP_ATOMIC);
+ if (!event_reply) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
+index e6ac317..32c535f 100644
+--- a/drivers/ssb/driver_pcicore.c
++++ b/drivers/ssb/driver_pcicore.c
+@@ -516,10 +516,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
+
+ static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
+ {
+- ssb_pcicore_fix_sprom_core_index(pc);
++ struct ssb_device *pdev = pc->dev;
++ struct ssb_bus *bus = pdev->bus;
++
++ if (bus->bustype == SSB_BUSTYPE_PCI)
++ ssb_pcicore_fix_sprom_core_index(pc);
+
+ /* Disable PCI interrupts. */
+- ssb_write32(pc->dev, SSB_INTVEC, 0);
++ ssb_write32(pdev, SSB_INTVEC, 0);
+
+ /* Additional PCIe always once-executed workarounds */
+ if (pc->dev->id.coreid == SSB_DEV_PCIE) {
+diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
+index 809cbda..1795977 100644
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -230,6 +230,7 @@ static int __devinit cru_detect(unsigned long map_entry,
+
+ cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
+
++ set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
+ asminline_call(&cmn_regs, bios32_entrypoint);
+
+ if (cmn_regs.u1.ral != 0) {
+@@ -247,8 +248,10 @@ static int __devinit cru_detect(unsigned long map_entry,
+ if ((physical_bios_base + physical_bios_offset)) {
+ cru_rom_addr =
+ ioremap(cru_physical_address, cru_length);
+- if (cru_rom_addr)
++ if (cru_rom_addr) {
++ set_memory_x((unsigned long)cru_rom_addr, cru_length);
+ retval = 0;
++ }
+ }
+
+ printk(KERN_DEBUG "hpwdt: CRU Base Address: 0x%lx\n",
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 5b3d984..babaf3a 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -191,7 +191,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+ * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
+ * the cached file length
+ */
+- if (origin != SEEK_SET || origin != SEEK_CUR) {
++ if (origin != SEEK_SET && origin != SEEK_CUR) {
+ struct inode *inode = filp->f_mapping->host;
+
+ int retval = nfs_revalidate_file_size(inode, filp);
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 39914be..efd8431 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1525,16 +1525,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
+ {
+ if (!flags)
+ return;
+- else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
++ if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
+ nfs41_handle_server_reboot(clp);
+- else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
++ if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+ SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
+ SEQ4_STATUS_ADMIN_STATE_REVOKED |
+ SEQ4_STATUS_LEASE_MOVED))
+ nfs41_handle_state_revoked(clp);
+- else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
++ if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
+ nfs41_handle_recallable_state_revoked(clp);
+- else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
++ if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+ SEQ4_STATUS_BACKCHANNEL_FAULT |
+ SEQ4_STATUS_CB_PATH_DOWN_SESSION))
+ nfs41_handle_cb_path_down(clp);
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index 41d6743..3e65427 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -842,6 +842,19 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ case FS_IOC32_GETVERSION:
+ cmd = FS_IOC_GETVERSION;
+ break;
++ case NILFS_IOCTL_CHANGE_CPMODE:
++ case NILFS_IOCTL_DELETE_CHECKPOINT:
++ case NILFS_IOCTL_GET_CPINFO:
++ case NILFS_IOCTL_GET_CPSTAT:
++ case NILFS_IOCTL_GET_SUINFO:
++ case NILFS_IOCTL_GET_SUSTAT:
++ case NILFS_IOCTL_GET_VINFO:
++ case NILFS_IOCTL_GET_BDESCS:
++ case NILFS_IOCTL_CLEAN_SEGMENTS:
++ case NILFS_IOCTL_SYNC:
++ case NILFS_IOCTL_RESIZE:
++ case NILFS_IOCTL_SET_ALLOC_RANGE:
++ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 7fbaa91..5e30b45 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -803,9 +803,6 @@ extern void blk_unprep_request(struct request *);
+ */
+ extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
+ spinlock_t *lock, int node_id);
+-extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
+- request_fn_proc *,
+- spinlock_t *, int node_id);
+ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
+ request_fn_proc *, spinlock_t *);
+diff --git a/include/linux/i2c/twl4030-madc.h b/include/linux/i2c/twl4030-madc.h
+index 6427d29..530e11b 100644
+--- a/include/linux/i2c/twl4030-madc.h
++++ b/include/linux/i2c/twl4030-madc.h
+@@ -129,6 +129,10 @@ enum sample_type {
+ #define REG_BCICTL2 0x024
+ #define TWL4030_BCI_ITHSENS 0x007
+
++/* Register and bits for GPBR1 register */
++#define TWL4030_REG_GPBR1 0x0c
++#define TWL4030_GPBR1_MADC_HFCLK_EN (1 << 7)
++
+ struct twl4030_madc_user_parms {
+ int channel;
+ int average;
+diff --git a/include/linux/lglock.h b/include/linux/lglock.h
+index f549056..87f402c 100644
+--- a/include/linux/lglock.h
++++ b/include/linux/lglock.h
+@@ -22,6 +22,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/lockdep.h>
+ #include <linux/percpu.h>
++#include <linux/cpu.h>
+
+ /* can make br locks by using local lock for read side, global lock for write */
+ #define br_lock_init(name) name##_lock_init()
+@@ -72,9 +73,31 @@
+
+ #define DEFINE_LGLOCK(name) \
+ \
++ DEFINE_SPINLOCK(name##_cpu_lock); \
++ cpumask_t name##_cpus __read_mostly; \
+ DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
+ DEFINE_LGLOCK_LOCKDEP(name); \
+ \
++ static int \
++ name##_lg_cpu_callback(struct notifier_block *nb, \
++ unsigned long action, void *hcpu) \
++ { \
++ switch (action & ~CPU_TASKS_FROZEN) { \
++ case CPU_UP_PREPARE: \
++ spin_lock(&name##_cpu_lock); \
++ cpu_set((unsigned long)hcpu, name##_cpus); \
++ spin_unlock(&name##_cpu_lock); \
++ break; \
++ case CPU_UP_CANCELED: case CPU_DEAD: \
++ spin_lock(&name##_cpu_lock); \
++ cpu_clear((unsigned long)hcpu, name##_cpus); \
++ spin_unlock(&name##_cpu_lock); \
++ } \
++ return NOTIFY_OK; \
++ } \
++ static struct notifier_block name##_lg_cpu_notifier = { \
++ .notifier_call = name##_lg_cpu_callback, \
++ }; \
+ void name##_lock_init(void) { \
+ int i; \
+ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
+@@ -83,6 +106,11 @@
+ lock = &per_cpu(name##_lock, i); \
+ *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
+ } \
++ register_hotcpu_notifier(&name##_lg_cpu_notifier); \
++ get_online_cpus(); \
++ for_each_online_cpu(i) \
++ cpu_set(i, name##_cpus); \
++ put_online_cpus(); \
+ } \
+ EXPORT_SYMBOL(name##_lock_init); \
+ \
+@@ -124,9 +152,9 @@
+ \
+ void name##_global_lock_online(void) { \
+ int i; \
+- preempt_disable(); \
++ spin_lock(&name##_cpu_lock); \
+ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
+- for_each_online_cpu(i) { \
++ for_each_cpu(i, &name##_cpus) { \
+ arch_spinlock_t *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ arch_spin_lock(lock); \
+@@ -137,12 +165,12 @@
+ void name##_global_unlock_online(void) { \
+ int i; \
+ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
+- for_each_online_cpu(i) { \
++ for_each_cpu(i, &name##_cpus) { \
+ arch_spinlock_t *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ arch_spin_unlock(lock); \
+ } \
+- preempt_enable(); \
++ spin_unlock(&name##_cpu_lock); \
+ } \
+ EXPORT_SYMBOL(name##_global_unlock_online); \
+ \
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 13d507d..8295249 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -53,6 +53,7 @@ struct dst_entry {
+ #define DST_NOHASH 0x0008
+ #define DST_NOCACHE 0x0010
+ #define DST_NOCOUNT 0x0020
++#define DST_NOPEER 0x0040
+
+ short error;
+ short obsolete;
+diff --git a/include/net/flow.h b/include/net/flow.h
+index a094477..57f15a7 100644
+--- a/include/net/flow.h
++++ b/include/net/flow.h
+@@ -207,6 +207,7 @@ extern struct flow_cache_object *flow_cache_lookup(
+ u8 dir, flow_resolve_t resolver, void *ctx);
+
+ extern void flow_cache_flush(void);
++extern void flow_cache_flush_deferred(void);
+ extern atomic_t flow_cache_genid;
+
+ #endif
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index f7d9c3f..ec86952 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -241,6 +241,9 @@ extern struct sctp_globals {
+ * bits is an indicator of when to send and window update SACK.
+ */
+ int rwnd_update_shift;
++
++ /* Threshold for autoclose timeout, in seconds. */
++ unsigned long max_autoclose;
+ } sctp_globals;
+
+ #define sctp_rto_initial (sctp_globals.rto_initial)
+@@ -281,6 +284,7 @@ extern struct sctp_globals {
+ #define sctp_auth_enable (sctp_globals.auth_enable)
+ #define sctp_checksum_disable (sctp_globals.checksum_disable)
+ #define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift)
++#define sctp_max_autoclose (sctp_globals.max_autoclose)
+
+ /* SCTP Socket type: UDP or TCP style. */
+ typedef enum {
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 1d2b6ce..b7ab0b8 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2098,11 +2098,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
+ continue;
+ /* get old css_set pointer */
+ task_lock(tsk);
+- if (tsk->flags & PF_EXITING) {
+- /* ignore this task if it's going away */
+- task_unlock(tsk);
+- continue;
+- }
+ oldcg = tsk->cgroups;
+ get_css_set(oldcg);
+ task_unlock(tsk);
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 2913b35..9e316ae 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -1542,8 +1542,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
+ }
+
+ /* dead body doesn't have much to contribute */
+- if (p->exit_state == EXIT_DEAD)
++ if (unlikely(p->exit_state == EXIT_DEAD)) {
++ /*
++ * But do not ignore this task until the tracer does
++ * wait_task_zombie()->do_notify_parent().
++ */
++ if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
++ wo->notask_error = 0;
+ return 0;
++ }
+
+ /* slay zombie? */
+ if (p->exit_state == EXIT_ZOMBIE) {
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 11cbe05..e6160fa 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -314,17 +314,29 @@ again:
+ #endif
+
+ lock_page(page_head);
++
++ /*
++ * If page_head->mapping is NULL, then it cannot be a PageAnon
++ * page; but it might be the ZERO_PAGE or in the gate area or
++ * in a special mapping (all cases which we are happy to fail);
++ * or it may have been a good file page when get_user_pages_fast
++ * found it, but truncated or holepunched or subjected to
++ * invalidate_complete_page2 before we got the page lock (also
++ * cases which we are happy to fail). And we hold a reference,
++ * so refcount care in invalidate_complete_page's remove_mapping
++ * prevents drop_caches from setting mapping to NULL beneath us.
++ *
++ * The case we do have to guard against is when memory pressure made
++ * shmem_writepage move it from filecache to swapcache beneath us:
++ * an unlikely race, but we do need to retry for page_head->mapping.
++ */
+ if (!page_head->mapping) {
++ int shmem_swizzled = PageSwapCache(page_head);
+ unlock_page(page_head);
+ put_page(page_head);
+- /*
+- * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
+- * trying to find one. RW mapping would have COW'd (and thus
+- * have a mapping) so this page is RO and won't ever change.
+- */
+- if ((page_head == ZERO_PAGE(address)))
+- return -EFAULT;
+- goto again;
++ if (shmem_swizzled)
++ goto again;
++ return -EFAULT;
+ }
+
+ /*
+diff --git a/kernel/hung_task.c b/kernel/hung_task.c
+index ea64012..e972276 100644
+--- a/kernel/hung_task.c
++++ b/kernel/hung_task.c
+@@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
+
+ /*
+ * Ensure the task is not frozen.
+- * Also, when a freshly created task is scheduled once, changes
+- * its state to TASK_UNINTERRUPTIBLE without having ever been
+- * switched out once, it musn't be checked.
++ * Also, skip vfork and any other user process that freezer should skip.
+ */
+- if (unlikely(t->flags & PF_FROZEN || !switch_count))
++ if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
++ return;
++
++ /*
++ * When a freshly created task is scheduled once, changes its state to
++ * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
++ * musn't be checked.
++ */
++ if (unlikely(!switch_count))
+ return;
+
+ if (switch_count != t->last_switch_count) {
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index a70d2a5..67d1fdd 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -96,9 +96,20 @@ void __ptrace_unlink(struct task_struct *child)
+ */
+ if (!(child->flags & PF_EXITING) &&
+ (child->signal->flags & SIGNAL_STOP_STOPPED ||
+- child->signal->group_stop_count))
++ child->signal->group_stop_count)) {
+ child->jobctl |= JOBCTL_STOP_PENDING;
+
++ /*
++ * This is only possible if this thread was cloned by the
++ * traced task running in the stopped group, set the signal
++ * for the future reports.
++ * FIXME: we should change ptrace_init_task() to handle this
++ * case.
++ */
++ if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
++ child->jobctl |= SIGSTOP;
++ }
++
+ /*
+ * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
+ * @child in the butt. Note that @resume should be used iff @child
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 291c970..195331c 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -1986,8 +1986,6 @@ static bool do_signal_stop(int signr)
+ */
+ if (!(sig->flags & SIGNAL_STOP_STOPPED))
+ sig->group_exit_code = signr;
+- else
+- WARN_ON_ONCE(!current->ptrace);
+
+ sig->group_stop_count = 0;
+
+diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
+index e8bffbe..2ce1b30 100644
+--- a/kernel/sysctl_binary.c
++++ b/kernel/sysctl_binary.c
+@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
+
+ fput(file);
+ out_putname:
+- putname(pathname);
++ __putname(pathname);
+ out:
+ return result;
+ }
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 7771871..b91f3aa 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1828,7 +1828,7 @@ repeat:
+ page = __page_cache_alloc(gfp | __GFP_COLD);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+- err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
++ err = add_to_page_cache_lru(page, mapping, index, gfp);
+ if (unlikely(err)) {
+ page_cache_release(page);
+ if (err == -EEXIST)
+@@ -1925,10 +1925,7 @@ static struct page *wait_on_page_read(struct page *page)
+ * @gfp: the page allocator flags to use if allocating
+ *
+ * This is the same as "read_mapping_page(mapping, index, NULL)", but with
+- * any new page allocations done using the specified allocation flags. Note
+- * that the Radix tree operations will still use GFP_KERNEL, so you can't
+- * expect to do this atomically or anything like that - but you can pass in
+- * other page requirements.
++ * any new page allocations done using the specified allocation flags.
+ *
+ * If the page does not get brought uptodate, return -EIO.
+ */
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 73f17c0..2316840 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -901,7 +901,6 @@ retry:
+ h->resv_huge_pages += delta;
+ ret = 0;
+
+- spin_unlock(&hugetlb_lock);
+ /* Free the needed pages to the hugetlb pool */
+ list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+ if ((--needed) < 0)
+@@ -915,6 +914,7 @@ retry:
+ VM_BUG_ON(page_count(page));
+ enqueue_huge_page(h, page);
+ }
++ spin_unlock(&hugetlb_lock);
+
+ /* Free unnecessary surplus pages to the buddy allocator */
+ free:
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 3508777..afde618 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4898,9 +4898,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
+ int cpu;
+ enable_swap_cgroup();
+ parent = NULL;
+- root_mem_cgroup = mem;
+ if (mem_cgroup_soft_limit_tree_init())
+ goto free_out;
++ root_mem_cgroup = mem;
+ for_each_possible_cpu(cpu) {
+ struct memcg_stock_pcp *stock =
+ &per_cpu(memcg_stock, cpu);
+@@ -4939,7 +4939,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
+ return &mem->css;
+ free_out:
+ __mem_cgroup_free(mem);
+- root_mem_cgroup = NULL;
+ return ERR_PTR(error);
+ }
+
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 9c51f9f..2775fd0 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -636,6 +636,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+ struct vm_area_struct *prev;
+ struct vm_area_struct *vma;
+ int err = 0;
++ pgoff_t pgoff;
+ unsigned long vmstart;
+ unsigned long vmend;
+
+@@ -643,13 +644,21 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+ if (!vma || vma->vm_start > start)
+ return -EFAULT;
+
++ if (start > vma->vm_start)
++ prev = vma;
++
+ for (; vma && vma->vm_start < end; prev = vma, vma = next) {
+ next = vma->vm_next;
+ vmstart = max(start, vma->vm_start);
+ vmend = min(end, vma->vm_end);
+
++ if (mpol_equal(vma_policy(vma), new_pol))
++ continue;
++
++ pgoff = vma->vm_pgoff +
++ ((vmstart - vma->vm_start) >> PAGE_SHIFT);
+ prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
+- vma->anon_vma, vma->vm_file, vma->vm_pgoff,
++ vma->anon_vma, vma->vm_file, pgoff,
+ new_pol);
+ if (prev) {
+ vma = prev;
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 626303b..e9a1785 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p,
+ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
+ const nodemask_t *nodemask, unsigned long totalpages)
+ {
+- int points;
++ long points;
+
+ if (oom_unkillable_task(p, mem, nodemask))
+ return 0;
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 93b5a7c..0ae7a09 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1011,9 +1011,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
+ if (!is_vmalloc_addr(addr))
+ return __pa(addr);
+ else
+- return page_to_phys(vmalloc_to_page(addr));
++ return page_to_phys(vmalloc_to_page(addr)) +
++ offset_in_page(addr);
+ } else
+- return page_to_phys(pcpu_addr_to_page(addr));
++ return page_to_phys(pcpu_addr_to_page(addr)) +
++ offset_in_page(addr);
+ }
+
+ /**
+diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
+index d6ec372..5693e5f 100644
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -141,7 +141,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
+ rt->dst.dev = br->dev;
+ rt->dst.path = &rt->dst;
+ dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
+- rt->dst.flags = DST_NOXFRM;
++ rt->dst.flags = DST_NOXFRM | DST_NOPEER;
+ rt->dst.ops = &fake_dst_ops;
+ }
+
+diff --git a/net/core/flow.c b/net/core/flow.c
+index 555a456..d6968e5 100644
+--- a/net/core/flow.c
++++ b/net/core/flow.c
+@@ -358,6 +358,18 @@ void flow_cache_flush(void)
+ put_online_cpus();
+ }
+
++static void flow_cache_flush_task(struct work_struct *work)
++{
++ flow_cache_flush();
++}
++
++static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
++
++void flow_cache_flush_deferred(void)
++{
++ schedule_work(&flow_cache_flush_work);
++}
++
+ static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
+ {
+ struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index bc19bd0..070f214 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+ {
++ int old_value = *(int *)ctl->data;
+ int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
++ int new_value = *(int *)ctl->data;
+
+ if (write) {
+ struct ipv4_devconf *cnf = ctl->extra1;
+@@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
+
+ if (cnf == net->ipv4.devconf_dflt)
+ devinet_copy_dflt_conf(net, i);
++ if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
++ if ((new_value == 0) && (old_value != 0))
++ rt_cache_flush(net, 0);
+ }
+
+ return ret;
+diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
+index 472a8c4..004bb74 100644
+--- a/net/ipv4/ipconfig.c
++++ b/net/ipv4/ipconfig.c
+@@ -252,6 +252,10 @@ static int __init ic_open_devs(void)
+ }
+ }
+
++ /* no point in waiting if we could not bring up at least one device */
++ if (!ic_first_dev)
++ goto have_carrier;
++
+ /* wait for a carrier on at least one device */
+ start = jiffies;
+ while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
+diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
+index 378b20b..6f06f7f 100644
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
+ if (register_netdevice(dev) < 0)
+ goto failed_free;
+
++ strcpy(nt->parms.name, dev->name);
++
+ dev_hold(dev);
+ ipip_tunnel_link(ipn, nt);
+ return nt;
+@@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev)
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+
+ tunnel->dev = dev;
+- strcpy(tunnel->parms.name, dev->name);
+
+ memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
+ memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
+@@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
+ static int __net_init ipip_init_net(struct net *net)
+ {
+ struct ipip_net *ipn = net_generic(net, ipip_net_id);
++ struct ip_tunnel *t;
+ int err;
+
+ ipn->tunnels[0] = ipn->tunnels_wc;
+@@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net)
+ if ((err = register_netdev(ipn->fb_tunnel_dev)))
+ goto err_reg_dev;
+
++ t = netdev_priv(ipn->fb_tunnel_dev);
++
++ strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
+ return 0;
+
+ err_reg_dev:
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 05ac666c..b563854 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -91,6 +91,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/times.h>
+ #include <linux/slab.h>
++#include <linux/prefetch.h>
+ #include <net/dst.h>
+ #include <net/net_namespace.h>
+ #include <net/protocol.h>
+@@ -134,6 +135,9 @@ static int ip_rt_min_advmss __read_mostly = 256;
+ static int rt_chain_length_max __read_mostly = 20;
+ static int redirect_genid;
+
++static struct delayed_work expires_work;
++static unsigned long expires_ljiffies;
++
+ /*
+ * Interface to generic destination cache.
+ */
+@@ -831,6 +835,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
+ return ONE;
+ }
+
++static void rt_check_expire(void)
++{
++ static unsigned int rover;
++ unsigned int i = rover, goal;
++ struct rtable *rth;
++ struct rtable __rcu **rthp;
++ unsigned long samples = 0;
++ unsigned long sum = 0, sum2 = 0;
++ unsigned long delta;
++ u64 mult;
++
++ delta = jiffies - expires_ljiffies;
++ expires_ljiffies = jiffies;
++ mult = ((u64)delta) << rt_hash_log;
++ if (ip_rt_gc_timeout > 1)
++ do_div(mult, ip_rt_gc_timeout);
++ goal = (unsigned int)mult;
++ if (goal > rt_hash_mask)
++ goal = rt_hash_mask + 1;
++ for (; goal > 0; goal--) {
++ unsigned long tmo = ip_rt_gc_timeout;
++ unsigned long length;
++
++ i = (i + 1) & rt_hash_mask;
++ rthp = &rt_hash_table[i].chain;
++
++ if (need_resched())
++ cond_resched();
++
++ samples++;
++
++ if (rcu_dereference_raw(*rthp) == NULL)
++ continue;
++ length = 0;
++ spin_lock_bh(rt_hash_lock_addr(i));
++ while ((rth = rcu_dereference_protected(*rthp,
++ lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
++ prefetch(rth->dst.rt_next);
++ if (rt_is_expired(rth)) {
++ *rthp = rth->dst.rt_next;
++ rt_free(rth);
++ continue;
++ }
++ if (rth->dst.expires) {
++ /* Entry is expired even if it is in use */
++ if (time_before_eq(jiffies, rth->dst.expires)) {
++nofree:
++ tmo >>= 1;
++ rthp = &rth->dst.rt_next;
++ /*
++ * We only count entries on
++ * a chain with equal hash inputs once
++ * so that entries for different QOS
++ * levels, and other non-hash input
++ * attributes don't unfairly skew
++ * the length computation
++ */
++ length += has_noalias(rt_hash_table[i].chain, rth);
++ continue;
++ }
++ } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
++ goto nofree;
++
++ /* Cleanup aged off entries. */
++ *rthp = rth->dst.rt_next;
++ rt_free(rth);
++ }
++ spin_unlock_bh(rt_hash_lock_addr(i));
++ sum += length;
++ sum2 += length*length;
++ }
++ if (samples) {
++ unsigned long avg = sum / samples;
++ unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
++ rt_chain_length_max = max_t(unsigned long,
++ ip_rt_gc_elasticity,
++ (avg + 4*sd) >> FRACT_BITS);
++ }
++ rover = i;
++}
++
++/*
++ * rt_worker_func() is run in process context.
++ * we call rt_check_expire() to scan part of the hash table
++ */
++static void rt_worker_func(struct work_struct *work)
++{
++ rt_check_expire();
++ schedule_delayed_work(&expires_work, ip_rt_gc_interval);
++}
++
+ /*
+ * Perturbation of rt_genid by a small quantity [1..256]
+ * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
+@@ -1272,7 +1367,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
+ {
+ struct rtable *rt = (struct rtable *) dst;
+
+- if (rt) {
++ if (rt && !(rt->dst.flags & DST_NOPEER)) {
+ if (rt->peer == NULL)
+ rt_bind_peer(rt, rt->rt_dst, 1);
+
+@@ -1283,7 +1378,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
+ iph->id = htons(inet_getid(rt->peer, more));
+ return;
+ }
+- } else
++ } else if (!rt)
+ printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
+ __builtin_return_address(0));
+
+@@ -3176,6 +3271,13 @@ static ctl_table ipv4_route_table[] = {
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
++ .procname = "gc_interval",
++ .data = &ip_rt_gc_interval,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_jiffies,
++ },
++ {
+ .procname = "redirect_load",
+ .data = &ip_rt_redirect_load,
+ .maxlen = sizeof(int),
+@@ -3385,6 +3487,11 @@ int __init ip_rt_init(void)
+ devinet_init();
+ ip_fib_init();
+
++ INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
++ expires_ljiffies = jiffies;
++ schedule_delayed_work(&expires_work,
++ net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
++
+ if (ip_rt_proc_init())
+ printk(KERN_ERR "Unable to create route proc files\n");
+ #ifdef CONFIG_XFRM
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 4c882cf..55a35c1 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -606,7 +606,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+ static atomic_t ipv6_fragmentation_id;
+ int old, new;
+
+- if (rt) {
++ if (rt && !(rt->dst.flags & DST_NOPEER)) {
+ struct inet_peer *peer;
+
+ if (!rt->rt6i_peer)
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 57b82dc..f02fe52 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -725,7 +725,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
+ int attempts = !in_softirq();
+
+ if (!(rt->rt6i_flags&RTF_GATEWAY)) {
+- if (rt->rt6i_dst.plen != 128 &&
++ if (ort->rt6i_dst.plen != 128 &&
+ ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
+ rt->rt6i_flags |= RTF_ANYCAST;
+ ipv6_addr_copy(&rt->rt6i_gateway, daddr);
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 00b15ac..c1e0d63 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
+ if (register_netdevice(dev) < 0)
+ goto failed_free;
+
++ strcpy(nt->parms.name, dev->name);
++
+ dev_hold(dev);
+
+ ipip6_tunnel_link(sitn, nt);
+@@ -1144,7 +1146,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+
+ tunnel->dev = dev;
+- strcpy(tunnel->parms.name, dev->name);
+
+ memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
+ memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
+@@ -1207,6 +1208,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
+ static int __net_init sit_init_net(struct net *net)
+ {
+ struct sit_net *sitn = net_generic(net, sit_net_id);
++ struct ip_tunnel *t;
+ int err;
+
+ sitn->tunnels[0] = sitn->tunnels_wc;
+@@ -1231,6 +1233,9 @@ static int __net_init sit_init_net(struct net *net)
+ if ((err = register_netdev(sitn->fb_tunnel_dev)))
+ goto err_reg_dev;
+
++ t = netdev_priv(sitn->fb_tunnel_dev);
++
++ strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
+ return 0;
+
+ err_reg_dev:
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index dfd3a64..a18e6c3 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ copied += used;
+ len -= used;
+
++ /* For non stream protcols we get one packet per recvmsg call */
++ if (sk->sk_type != SOCK_STREAM)
++ goto copy_uaddr;
++
+ if (!(flags & MSG_PEEK)) {
+ sk_eat_skb(sk, skb, 0);
+ *seq = 0;
+ }
+
+- /* For non stream protcols we get one packet per recvmsg call */
+- if (sk->sk_type != SOCK_STREAM)
+- goto copy_uaddr;
+-
+ /* Partial read */
+ if (used + offset < skb->len)
+ continue;
+@@ -857,6 +857,12 @@ copy_uaddr:
+ }
+ if (llc_sk(sk)->cmsg_flags)
+ llc_cmsg_rcv(msg, skb);
++
++ if (!(flags & MSG_PEEK)) {
++ sk_eat_skb(sk, skb, 0);
++ *seq = 0;
++ }
++
+ goto out;
+ }
+
+diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
+index db7db43..b7f4f5c 100644
+--- a/net/mac80211/agg-tx.c
++++ b/net/mac80211/agg-tx.c
+@@ -304,6 +304,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
+ __release(agg_queue);
+ }
+
++/*
++ * splice packets from the STA's pending to the local pending,
++ * requires a call to ieee80211_agg_splice_finish later
++ */
++static void __acquires(agg_queue)
++ieee80211_agg_splice_packets(struct ieee80211_local *local,
++ struct tid_ampdu_tx *tid_tx, u16 tid)
++{
++ int queue = ieee80211_ac_from_tid(tid);
++ unsigned long flags;
++
++ ieee80211_stop_queue_agg(local, tid);
++
++ if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
++ " from the pending queue\n", tid))
++ return;
++
++ if (!skb_queue_empty(&tid_tx->pending)) {
++ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
++ /* copy over remaining packets */
++ skb_queue_splice_tail_init(&tid_tx->pending,
++ &local->pending[queue]);
++ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
++ }
++}
++
++static void __releases(agg_queue)
++ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
++{
++ ieee80211_wake_queue_agg(local, tid);
++}
++
+ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ {
+ struct tid_ampdu_tx *tid_tx;
+@@ -315,19 +347,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
+
+ /*
+- * While we're asking the driver about the aggregation,
+- * stop the AC queue so that we don't have to worry
+- * about frames that came in while we were doing that,
+- * which would require us to put them to the AC pending
+- * afterwards which just makes the code more complex.
++ * Start queuing up packets for this aggregation session.
++ * We're going to release them once the driver is OK with
++ * that.
+ */
+- ieee80211_stop_queue_agg(local, tid);
+-
+ clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
+
+ /*
+- * make sure no packets are being processed to get
+- * valid starting sequence number
++ * Make sure no packets are being processed. This ensures that
++ * we have a valid starting sequence number and that in-flight
++ * packets have been flushed out and no packets for this TID
++ * will go into the driver during the ampdu_action call.
+ */
+ synchronize_net();
+
+@@ -341,17 +371,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+ " tid %d\n", tid);
+ #endif
+ spin_lock_bh(&sta->lock);
++ ieee80211_agg_splice_packets(local, tid_tx, tid);
+ ieee80211_assign_tid_tx(sta, tid, NULL);
++ ieee80211_agg_splice_finish(local, tid);
+ spin_unlock_bh(&sta->lock);
+
+- ieee80211_wake_queue_agg(local, tid);
+ kfree_rcu(tid_tx, rcu_head);
+ return;
+ }
+
+- /* we can take packets again now */
+- ieee80211_wake_queue_agg(local, tid);
+-
+ /* activate the timer for the recipient's addBA response */
+ mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
+ #ifdef CONFIG_MAC80211_HT_DEBUG
+@@ -471,38 +499,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
+ }
+ EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
+
+-/*
+- * splice packets from the STA's pending to the local pending,
+- * requires a call to ieee80211_agg_splice_finish later
+- */
+-static void __acquires(agg_queue)
+-ieee80211_agg_splice_packets(struct ieee80211_local *local,
+- struct tid_ampdu_tx *tid_tx, u16 tid)
+-{
+- int queue = ieee80211_ac_from_tid(tid);
+- unsigned long flags;
+-
+- ieee80211_stop_queue_agg(local, tid);
+-
+- if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
+- " from the pending queue\n", tid))
+- return;
+-
+- if (!skb_queue_empty(&tid_tx->pending)) {
+- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+- /* copy over remaining packets */
+- skb_queue_splice_tail_init(&tid_tx->pending,
+- &local->pending[queue]);
+- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+- }
+-}
+-
+-static void __releases(agg_queue)
+-ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
+-{
+- ieee80211_wake_queue_agg(local, tid);
+-}
+-
+ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
+ struct sta_info *sta, u16 tid)
+ {
+diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
+index b9493a0..6cd8ddf 100644
+--- a/net/sched/sch_gred.c
++++ b/net/sched/sch_gred.c
+@@ -385,7 +385,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
+ struct gred_sched_data *q;
+
+ if (table->tab[dp] == NULL) {
+- table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
++ table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC);
+ if (table->tab[dp] == NULL)
+ return -ENOMEM;
+ }
+diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
+index ea17cbe..59b26b8 100644
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -106,7 +106,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+ if (!netif_is_multiqueue(dev))
+ return -EOPNOTSUPP;
+
+- if (nla_len(opt) < sizeof(*qopt))
++ if (!opt || nla_len(opt) < sizeof(*qopt))
+ return -EINVAL;
+
+ qopt = nla_data(opt);
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index dc16b90..4981482 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
+- (unsigned long)sp->autoclose * HZ;
++ min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
+
+ /* Initializes the timers */
+ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 08b3cea..817174e 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
+ /* Keep track of how many bytes are in flight to the receiver. */
+ asoc->outqueue.outstanding_bytes += datasize;
+
+- /* Update our view of the receiver's rwnd. Include sk_buff overhead
+- * while updating peer.rwnd so that it reduces the chances of a
+- * receiver running out of receive buffer space even when receive
+- * window is still open. This can happen when a sender is sending
+- * sending small messages.
+- */
+- datasize += sizeof(struct sk_buff);
++ /* Update our view of the receiver's rwnd. */
+ if (datasize < rwnd)
+ rwnd -= datasize;
+ else
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
+index a6d27bf..6edd7de 100644
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
+ chunk->transport->flight_size -=
+ sctp_data_size(chunk);
+ q->outstanding_bytes -= sctp_data_size(chunk);
+- q->asoc->peer.rwnd += (sctp_data_size(chunk) +
+- sizeof(struct sk_buff));
++ q->asoc->peer.rwnd += sctp_data_size(chunk);
+ }
+ continue;
+ }
+@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
+ * (Section 7.2.4)), add the data size of those
+ * chunks to the rwnd.
+ */
+- q->asoc->peer.rwnd += (sctp_data_size(chunk) +
+- sizeof(struct sk_buff));
++ q->asoc->peer.rwnd += sctp_data_size(chunk);
+ q->outstanding_bytes -= sctp_data_size(chunk);
+ if (chunk->transport)
+ transport->flight_size -= sctp_data_size(chunk);
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 91784f4..48cb7b9 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
+ sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
+ sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
+
++ /* Initialize maximum autoclose timeout. */
++ sctp_max_autoclose = INT_MAX / HZ;
++
+ /* Initialize handle used for association ids. */
+ idr_init(&sctp_assocs_id);
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 836aa63..4760f4e 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -2199,8 +2199,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
+ return -EINVAL;
+ if (copy_from_user(&sp->autoclose, optval, optlen))
+ return -EFAULT;
+- /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
+- sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
+
+ return 0;
+ }
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
+index 6b39529..60ffbd0 100644
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
+ static int sack_timer_max = 500;
+ static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
+ static int rwnd_scale_max = 16;
++static unsigned long max_autoclose_min = 0;
++static unsigned long max_autoclose_max =
++ (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
++ ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
+
+ extern long sysctl_sctp_mem[3];
+ extern int sysctl_sctp_rmem[3];
+@@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
+ .extra1 = &one,
+ .extra2 = &rwnd_scale_max,
+ },
++ {
++ .procname = "max_autoclose",
++ .data = &sctp_max_autoclose,
++ .maxlen = sizeof(unsigned long),
++ .mode = 0644,
++ .proc_handler = &proc_doulongvec_minmax,
++ .extra1 = &max_autoclose_min,
++ .extra2 = &max_autoclose_max,
++ },
+
+ { /* sentinel */ }
+ };
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index f4385e4..c64c0ef 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -995,13 +995,11 @@ out_init_req:
+
+ static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
+ {
+- if (xprt_dynamic_free_slot(xprt, req))
+- return;
+-
+- memset(req, 0, sizeof(*req)); /* mark unused */
+-
+ spin_lock(&xprt->reserve_lock);
+- list_add(&req->rq_list, &xprt->free);
++ if (!xprt_dynamic_free_slot(xprt, req)) {
++ memset(req, 0, sizeof(*req)); /* mark unused */
++ list_add(&req->rq_list, &xprt->free);
++ }
+ rpc_wake_up_next(&xprt->backlog);
+ spin_unlock(&xprt->reserve_lock);
+ }
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 552df27..7e088c0 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net)
+ {
+ struct dst_entry *head, *next;
+
+- flow_cache_flush();
+-
+ spin_lock_bh(&xfrm_policy_sk_bundle_lock);
+ head = xfrm_policy_sk_bundles;
+ xfrm_policy_sk_bundles = NULL;
+@@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net)
+ }
+ }
+
++static void xfrm_garbage_collect(struct net *net)
++{
++ flow_cache_flush();
++ __xfrm_garbage_collect(net);
++}
++
++static void xfrm_garbage_collect_deferred(struct net *net)
++{
++ flow_cache_flush_deferred();
++ __xfrm_garbage_collect(net);
++}
++
+ static void xfrm_init_pmtu(struct dst_entry *dst)
+ {
+ do {
+@@ -2420,7 +2430,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
+ if (likely(dst_ops->neigh_lookup == NULL))
+ dst_ops->neigh_lookup = xfrm_neigh_lookup;
+ if (likely(afinfo->garbage_collect == NULL))
+- afinfo->garbage_collect = __xfrm_garbage_collect;
++ afinfo->garbage_collect = xfrm_garbage_collect_deferred;
+ xfrm_policy_afinfo[afinfo->family] = afinfo;
+ }
+ write_unlock_bh(&xfrm_policy_afinfo_lock);
+@@ -2514,7 +2524,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
+
+ switch (event) {
+ case NETDEV_DOWN:
+- __xfrm_garbage_collect(dev_net(dev));
++ xfrm_garbage_collect(dev_net(dev));
+ }
+ return NOTIFY_DONE;
+ }
+diff --git a/security/selinux/netport.c b/security/selinux/netport.c
+index 0b62bd1..7b9eb1f 100644
+--- a/security/selinux/netport.c
++++ b/security/selinux/netport.c
+@@ -123,7 +123,9 @@ static void sel_netport_insert(struct sel_netport *port)
+ if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
+ struct sel_netport *tail;
+ tail = list_entry(
+- rcu_dereference(sel_netport_hash[idx].list.prev),
++ rcu_dereference_protected(
++ sel_netport_hash[idx].list.prev,
++ lockdep_is_held(&sel_netport_lock)),
+ struct sel_netport, list);
+ list_del_rcu(&tail->list);
+ kfree_rcu(tail, rcu);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index f665975..82b7c88 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2375,6 +2375,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
+ SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
++ SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
+diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
+index c9c4e5c..5c40874 100644
+--- a/sound/soc/codecs/wm8996.c
++++ b/sound/soc/codecs/wm8996.c
+@@ -1895,6 +1895,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
+ break;
+ case 24576000:
+ ratediv = WM8996_SYSCLK_DIV;
++ wm8996->sysclk /= 2;
+ case 12288000:
+ snd_soc_update_bits(codec, WM8996_AIF_RATE,
+ WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE);
diff --git a/3.1.7/4420_grsecurity-2.2.2-3.1.7-201201032037.patch b/3.1.8/4420_grsecurity-2.2.2-3.1.8-201201111906.patch
index ed41cd3..990a964 100644
--- a/3.1.7/4420_grsecurity-2.2.2-3.1.7-201201032037.patch
+++ b/3.1.8/4420_grsecurity-2.2.2-3.1.8-201201111906.patch
@@ -186,7 +186,7 @@ index d6e6724..a024ce8 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 96c48df..f811964 100644
+index 64a2e76..5b86280 100644
--- a/Makefile
+++ b/Makefile
@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -212,7 +212,7 @@ index 96c48df..f811964 100644
$(Q)$(MAKE) $(build)=scripts/basic
$(Q)rm -f .tmp_quiet_recordmcount
-@@ -564,6 +565,42 @@ else
+@@ -564,6 +565,46 @@ else
KBUILD_CFLAGS += -O2
endif
@@ -239,9 +239,13 @@ index 96c48df..f811964 100644
+endif
+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
++ifeq ($(KBUILD_EXTMOD),)
+gcc-plugins:
+ $(Q)$(MAKE) $(build)=tools/gcc
+else
++gcc-plugins: ;
++endif
++else
+gcc-plugins:
+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
@@ -255,7 +259,7 @@ index 96c48df..f811964 100644
include $(srctree)/arch/$(SRCARCH)/Makefile
ifneq ($(CONFIG_FRAME_WARN),0)
-@@ -708,7 +745,7 @@ export mod_strip_cmd
+@@ -708,7 +749,7 @@ export mod_strip_cmd
ifeq ($(KBUILD_EXTMOD),)
@@ -264,7 +268,7 @@ index 96c48df..f811964 100644
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -932,6 +969,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
+@@ -932,6 +973,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
@@ -272,7 +276,7 @@ index 96c48df..f811964 100644
$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
# Handle descending into subdirectories listed in $(vmlinux-dirs)
-@@ -941,7 +979,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+@@ -941,7 +983,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
# Error messages still appears in the original language
PHONY += $(vmlinux-dirs)
@@ -281,7 +285,7 @@ index 96c48df..f811964 100644
$(Q)$(MAKE) $(build)=$@
# Store (new) KERNELRELASE string in include/config/kernel.release
-@@ -986,6 +1024,7 @@ prepare0: archprepare FORCE
+@@ -986,6 +1028,7 @@ prepare0: archprepare FORCE
$(Q)$(MAKE) $(build)=. missing-syscalls
# All the preparing..
@@ -289,7 +293,7 @@ index 96c48df..f811964 100644
prepare: prepare0
# Generate some files
-@@ -1087,6 +1126,7 @@ all: modules
+@@ -1087,6 +1130,7 @@ all: modules
# using awk while concatenating to the final file.
PHONY += modules
@@ -297,7 +301,7 @@ index 96c48df..f811964 100644
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
@$(kecho) ' Building modules, stage 2.';
-@@ -1102,7 +1142,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
+@@ -1102,7 +1146,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
# Target to prepare building external modules
PHONY += modules_prepare
@@ -306,7 +310,7 @@ index 96c48df..f811964 100644
# Target to install modules
PHONY += modules_install
-@@ -1198,7 +1238,7 @@ distclean: mrproper
+@@ -1198,7 +1242,7 @@ distclean: mrproper
@find $(srctree) $(RCS_FIND_IGNORE) \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
@@ -315,7 +319,7 @@ index 96c48df..f811964 100644
-o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
-type f -print | xargs rm -f
-@@ -1360,6 +1400,7 @@ PHONY += $(module-dirs) modules
+@@ -1360,6 +1404,7 @@ PHONY += $(module-dirs) modules
$(module-dirs): crmodverdir $(objtree)/Module.symvers
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
@@ -323,7 +327,7 @@ index 96c48df..f811964 100644
modules: $(module-dirs)
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1486,17 +1527,19 @@ else
+@@ -1486,17 +1531,19 @@ else
target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
endif
@@ -347,7 +351,7 @@ index 96c48df..f811964 100644
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.symtypes: %.c prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1506,11 +1549,13 @@ endif
+@@ -1506,11 +1553,13 @@ endif
$(cmd_crmodverdir)
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
$(build)=$(build-dir)
@@ -3103,19 +3107,6 @@ index 541a750..8739853 100644
- return base;
- return ret;
-}
-diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
-index 7b371c3..ad06cf1 100644
---- a/arch/s390/kernel/setup.c
-+++ b/arch/s390/kernel/setup.c
-@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *p)
- }
- early_param("mem", early_parse_mem);
-
--unsigned int user_mode = HOME_SPACE_MODE;
-+unsigned int user_mode = SECONDARY_SPACE_MODE;
- EXPORT_SYMBOL_GPL(user_mode);
-
- static int set_amode_and_uaccess(unsigned long user_amode,
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index c9a9f7f..60d0315 100644
--- a/arch/s390/mm/mmap.c
@@ -3523,7 +3514,7 @@ index 7df8b7f..4946269 100644
#define ELF_HWCAP sparc64_elf_hwcap
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
-index 5b31a8e..1d92567 100644
+index a790cc6..091ed94 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
@@ -4547,7 +4538,7 @@ index 1b30bb3..b4a16c7 100644
/* Atomic bit operations. */
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
-index e3cda21..a68e4cb 100644
+index 301421c..e2535d1 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -2,7 +2,7 @@
@@ -6629,7 +6620,7 @@ index 6557769..ef6ae89 100644
if (err)
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index 54edb207..9335b5f 100644
+index 54edb207..f5101b9 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -13,7 +13,9 @@
@@ -6642,7 +6633,7 @@ index 54edb207..9335b5f 100644
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
-@@ -95,6 +97,30 @@ ENTRY(native_irq_enable_sysexit)
+@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
ENDPROC(native_irq_enable_sysexit)
#endif
@@ -6659,7 +6650,9 @@ index 54edb207..9335b5f 100644
+#endif
+#ifdef CONFIG_PAX_RANDKSTACK
+ pushq %rax
++ pushq %r11
+ call pax_randomize_kstack
++ popq %r11
+ popq %rax
+#endif
+ .endm
@@ -6673,7 +6666,7 @@ index 54edb207..9335b5f 100644
/*
* 32bit SYSENTER instruction entry.
*
-@@ -121,12 +147,6 @@ ENTRY(ia32_sysenter_target)
+@@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
CFI_REGISTER rsp,rbp
SWAPGS_UNSAFE_STACK
movq PER_CPU_VAR(kernel_stack), %rsp
@@ -6686,7 +6679,7 @@ index 54edb207..9335b5f 100644
movl %ebp,%ebp /* zero extension */
pushq_cfi $__USER32_DS
/*CFI_REL_OFFSET ss,0*/
-@@ -134,25 +154,38 @@ ENTRY(ia32_sysenter_target)
+@@ -134,25 +156,38 @@ ENTRY(ia32_sysenter_target)
CFI_REL_OFFSET rsp,0
pushfq_cfi
/*CFI_REL_OFFSET rflags,0*/
@@ -6731,7 +6724,7 @@ index 54edb207..9335b5f 100644
CFI_REMEMBER_STATE
jnz sysenter_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
-@@ -162,13 +195,15 @@ sysenter_do_call:
+@@ -162,13 +197,15 @@ sysenter_do_call:
sysenter_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -6750,7 +6743,7 @@ index 54edb207..9335b5f 100644
/* clear IF, that popfq doesn't enable interrupts early */
andl $~0x200,EFLAGS-R11(%rsp)
movl RIP-R11(%rsp),%edx /* User %eip */
-@@ -194,6 +229,9 @@ sysexit_from_sys_call:
+@@ -194,6 +231,9 @@ sysexit_from_sys_call:
movl %eax,%esi /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
call audit_syscall_entry
@@ -6760,7 +6753,7 @@ index 54edb207..9335b5f 100644
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -205,7 +243,7 @@ sysexit_from_sys_call:
+@@ -205,7 +245,7 @@ sysexit_from_sys_call:
.endm
.macro auditsys_exit exit
@@ -6769,7 +6762,7 @@ index 54edb207..9335b5f 100644
jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
sti
-@@ -215,12 +253,12 @@ sysexit_from_sys_call:
+@@ -215,12 +255,12 @@ sysexit_from_sys_call:
movzbl %al,%edi /* zero-extend that into %edi */
inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
call audit_syscall_exit
@@ -6784,7 +6777,7 @@ index 54edb207..9335b5f 100644
jz \exit
CLEAR_RREGS -ARGOFFSET
jmp int_with_check
-@@ -238,7 +276,7 @@ sysexit_audit:
+@@ -238,7 +278,7 @@ sysexit_audit:
sysenter_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -6793,7 +6786,7 @@ index 54edb207..9335b5f 100644
jz sysenter_auditsys
#endif
SAVE_REST
-@@ -246,6 +284,9 @@ sysenter_tracesys:
+@@ -246,6 +286,9 @@ sysenter_tracesys:
movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
movq %rsp,%rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
@@ -6803,7 +6796,7 @@ index 54edb207..9335b5f 100644
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
cmpq $(IA32_NR_syscalls-1),%rax
-@@ -277,19 +318,20 @@ ENDPROC(ia32_sysenter_target)
+@@ -277,19 +320,20 @@ ENDPROC(ia32_sysenter_target)
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
@@ -6826,7 +6819,7 @@ index 54edb207..9335b5f 100644
movl %eax,%eax /* zero extension */
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
movq %rcx,RIP-ARGOFFSET(%rsp)
-@@ -305,13 +347,19 @@ ENTRY(ia32_cstar_target)
+@@ -305,13 +349,19 @@ ENTRY(ia32_cstar_target)
/* no need to do an access_ok check here because r8 has been
32bit zero extended */
/* hardware stack frame is complete now */
@@ -6849,7 +6842,7 @@ index 54edb207..9335b5f 100644
CFI_REMEMBER_STATE
jnz cstar_tracesys
cmpq $IA32_NR_syscalls-1,%rax
-@@ -321,13 +369,15 @@ cstar_do_call:
+@@ -321,13 +371,15 @@ cstar_do_call:
cstar_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -6868,7 +6861,7 @@ index 54edb207..9335b5f 100644
RESTORE_ARGS 0,-ARG_SKIP,0,0,0
movl RIP-ARGOFFSET(%rsp),%ecx
CFI_REGISTER rip,rcx
-@@ -355,7 +405,7 @@ sysretl_audit:
+@@ -355,7 +407,7 @@ sysretl_audit:
cstar_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -6877,7 +6870,7 @@ index 54edb207..9335b5f 100644
jz cstar_auditsys
#endif
xchgl %r9d,%ebp
-@@ -364,6 +414,9 @@ cstar_tracesys:
+@@ -364,6 +416,9 @@ cstar_tracesys:
movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
movq %rsp,%rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
@@ -6887,7 +6880,7 @@ index 54edb207..9335b5f 100644
LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
RESTORE_REST
xchgl %ebp,%r9d
-@@ -409,20 +462,21 @@ ENTRY(ia32_syscall)
+@@ -409,20 +464,21 @@ ENTRY(ia32_syscall)
CFI_REL_OFFSET rip,RIP-RIP
PARAVIRT_ADJUST_EXCEPTION_FRAME
SWAPGS
@@ -6917,7 +6910,7 @@ index 54edb207..9335b5f 100644
jnz ia32_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -441,6 +495,9 @@ ia32_tracesys:
+@@ -441,6 +497,9 @@ ia32_tracesys:
movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
movq %rsp,%rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
@@ -6927,7 +6920,7 @@ index 54edb207..9335b5f 100644
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
cmpq $(IA32_NR_syscalls-1),%rax
-@@ -455,6 +512,7 @@ ia32_badsys:
+@@ -455,6 +514,7 @@ ia32_badsys:
quiet_ni_syscall:
movq $-ENOSYS,%rax
@@ -7007,9 +7000,22 @@ index f6f5c53..b358b28 100644
set_fs(old_fs);
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
-index 091508b..0ee32ec 100644
+index 091508b..e245ff2 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
+@@ -4,10 +4,10 @@
+
+ #ifdef CONFIG_SMP
+ .macro LOCK_PREFIX
+-1: lock
++672: lock
+ .section .smp_locks,"a"
+ .balign 4
+- .long 1b - .
++ .long 672b - .
+ .previous
+ .endm
+ #else
@@ -15,6 +15,45 @@
.endm
#endif
@@ -7105,7 +7111,7 @@ index 20370c6..a2eb9b0 100644
"popl %%ebp\n\t"
"popl %%edi\n\t"
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
-index 10572e3..2618d91 100644
+index 10572e3..392d0bc 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -22,7 +22,18 @@
@@ -7358,10 +7364,11 @@ index 10572e3..2618d91 100644
: "+m" (v->counter), "=qm" (c)
: "ir" (i) : "memory");
return c;
-@@ -180,6 +342,46 @@ static inline int atomic_add_return(int i, atomic_t *v)
+@@ -180,16 +342,56 @@ static inline int atomic_add_return(int i, atomic_t *v)
#endif
/* Modern 486+ processor */
__i = i;
+- asm volatile(LOCK_PREFIX "xaddl %0, %1"
+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
@@ -7371,15 +7378,16 @@ index 10572e3..2618d91 100644
+ _ASM_EXTABLE(0b, 0b)
+#endif
+
-+ : "+r" (i), "+m" (v->counter)
-+ : : "memory");
-+ return i + __i;
-+
-+#ifdef CONFIG_M386
-+no_xadd: /* Legacy 386 processor */
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+
+ #ifdef CONFIG_M386
+ no_xadd: /* Legacy 386 processor */
+- raw_local_irq_save(flags);
+ local_irq_save(flags);
-+ __i = atomic_read(v);
-+ atomic_set(v, i + __i);
+ __i = atomic_read(v);
+ atomic_set(v, i + __i);
+ local_irq_restore(flags);
+ return i + __i;
+#endif
@@ -7402,9 +7410,19 @@ index 10572e3..2618d91 100644
+#endif
+ /* Modern 486+ processor */
+ __i = i;
- asm volatile(LOCK_PREFIX "xaddl %0, %1"
- : "+r" (i), "+m" (v->counter)
- : : "memory");
++ asm volatile(LOCK_PREFIX "xaddl %0, %1"
++ : "+r" (i), "+m" (v->counter)
++ : : "memory");
++ return i + __i;
++
++#ifdef CONFIG_M386
++no_xadd: /* Legacy 386 processor */
++ raw_local_irq_save(flags);
++ __i = atomic_read_unchecked(v);
++ atomic_set_unchecked(v, i + __i);
+ raw_local_irq_restore(flags);
+ return i + __i;
+ #endif
@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
}
@@ -11858,7 +11876,7 @@ index b13ed39..603286c 100644
if (c->x86_model == 3 && c->x86_mask == 0)
size = 64;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 6218439..0f1addc 100644
+index 6218439..ab2e4ab 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
@@ -11935,7 +11953,7 @@ index 6218439..0f1addc 100644
/* Filter out anything that depends on CPUID levels we don't have */
filter_cpuid_features(c, true);
-+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
+ setup_clear_cpu_cap(X86_FEATURE_SEP);
+#endif
+
@@ -13421,7 +13439,7 @@ index f3f6f53..0841b66 100644
/*
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 6419bb0..00440bf 100644
+index 6419bb0..bb59ca4 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -55,6 +55,8 @@
@@ -13605,9 +13623,9 @@ index 6419bb0..00440bf 100644
+ call pax_exit_kernel_user
+#endif
+#ifdef CONFIG_PAX_RANDKSTACK
-+ push %rax
++ pushq %rax
+ call pax_randomize_kstack
-+ pop %rax
++ popq %rax
+#endif
+ .endm
+
@@ -16137,7 +16155,7 @@ index 35ccf75..67e7d4d 100644
for (p = start; p < finish; p++) {
q = find_dependents_of(start, finish, p);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
-index 30eb651..0758167 100644
+index 30eb651..37fa2d7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
@@ -16220,17 +16238,16 @@ index 30eb651..0758167 100644
#else
regs.ss = __KERNEL_DS;
#endif
-@@ -411,7 +431,8 @@ bool set_pm_idle_to_default(void)
+@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
return ret;
}
-void stop_this_cpu(void *dummy)
-+
+__noreturn void stop_this_cpu(void *dummy)
{
local_irq_disable();
/*
-@@ -653,16 +674,37 @@ static int __init idle_setup(char *str)
+@@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
}
early_param("idle", idle_setup);
@@ -21388,7 +21405,7 @@ index d0474ad..36e9257 100644
extern u32 pnp_bios_is_utter_crap;
pnp_bios_is_utter_crap = 1;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
-index 0d17c8c..4f4764f 100644
+index 0d17c8c..c5d9925 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -13,11 +13,18 @@
@@ -21826,7 +21843,7 @@ index 0d17c8c..4f4764f 100644
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return;
-@@ -1193,3 +1410,240 @@ good_area:
+@@ -1193,3 +1410,292 @@ good_area:
up_read(&mm->mmap_sem);
}
@@ -21867,6 +21884,30 @@ index 0d17c8c..4f4764f 100644
+{
+ int err;
+
++ do { /* PaX: libffi trampoline emulation */
++ unsigned char mov, jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 9) >> 32)
++ break;
++#endif
++
++ err = get_user(mov, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++
++ if (err)
++ break;
++
++ if (mov == 0xB8 && jmp == 0xE9) {
++ regs->ax = addr1;
++ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
++ return 2;
++ }
++ } while (0);
++
+ do { /* PaX: gcc trampoline emulation #1 */
+ unsigned char mov1, mov2;
+ unsigned short jmp;
@@ -21926,6 +21967,34 @@ index 0d17c8c..4f4764f 100644
+{
+ int err;
+
++ do { /* PaX: libffi trampoline emulation */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char stcclc, jmp2;
++ unsigned long addr1, addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
++ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ if (stcclc == 0xF8)
++ regs->flags &= ~X86_EFLAGS_CF;
++ else
++ regs->flags |= X86_EFLAGS_CF;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
+ do { /* PaX: gcc trampoline emulation #1 */
+ unsigned short mov1, mov2, jmp1;
+ unsigned char jmp2;
@@ -23628,7 +23697,7 @@ index 6687022..ceabcfa 100644
+ pax_force_retaddr
ret
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
-index bfab3fa..05aac3a 100644
+index 7b65f75..63097f6 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
@@ -28195,7 +28264,7 @@ index 3c395a5..02889c2 100644
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 8a3942c..1b73bf1 100644
+index c72b590..aa86f0a 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
@@ -28208,7 +28277,7 @@ index 8a3942c..1b73bf1 100644
return can_switch;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 7916bd9..7c17a0f 100644
+index 1a2a2d1..f280182 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -222,7 +222,7 @@ struct drm_i915_display_funcs {
@@ -28229,7 +28298,7 @@ index 7916bd9..7c17a0f 100644
/* protects the irq masks */
spinlock_t irq_lock;
-@@ -882,7 +882,7 @@ struct drm_i915_gem_object {
+@@ -883,7 +883,7 @@ struct drm_i915_gem_object {
* will be page flipped away on the next vblank. When it
* reaches 0, dev_priv->pending_flip_queue will be woken up.
*/
@@ -28238,7 +28307,7 @@ index 7916bd9..7c17a0f 100644
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
-@@ -1262,7 +1262,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
+@@ -1263,7 +1263,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
extern void intel_teardown_gmbus(struct drm_device *dev);
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
@@ -28695,7 +28764,7 @@ index 184628c..30e1725 100644
/*
* Asic structures
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
-index 285acc4..f4d909f 100644
+index a098edc..d001c09 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -569,6 +569,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
@@ -35093,10 +35162,10 @@ index 38b6fc0..b5cbfce 100644
extern struct oprofile_stat_struct oprofile_stats;
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
-index e9ff6f7..28e259a 100644
+index 1c0b799..c11b2d2 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
-@@ -186,7 +186,7 @@ static const struct file_operations atomic_ro_fops = {
+@@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
@@ -41916,10 +41985,18 @@ index a6395bd..a5b24c4 100644
fd_offset + ex.a_text);
up_write(&current->mm->mmap_sem);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 21ac5ee..171b1d0 100644
+index 21ac5ee..f54fdd0 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
-@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump_params *cprm);
+@@ -32,6 +32,7 @@
+ #include <linux/elf.h>
+ #include <linux/utsname.h>
+ #include <linux/coredump.h>
++#include <linux/xattr.h>
+ #include <asm/uaccess.h>
+ #include <asm/param.h>
+ #include <asm/page.h>
+@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
#define elf_core_dump NULL
#endif
@@ -41930,7 +42007,7 @@ index 21ac5ee..171b1d0 100644
#if ELF_EXEC_PAGESIZE > PAGE_SIZE
#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
#else
-@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format = {
+@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
.load_binary = load_elf_binary,
.load_shlib = load_elf_library,
.core_dump = elf_core_dump,
@@ -41942,7 +42019,7 @@ index 21ac5ee..171b1d0 100644
.min_coredump = ELF_EXEC_PAGESIZE,
};
-@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format = {
+@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
static int set_brk(unsigned long start, unsigned long end)
{
@@ -41951,7 +42028,7 @@ index 21ac5ee..171b1d0 100644
start = ELF_PAGEALIGN(start);
end = ELF_PAGEALIGN(end);
if (end > start) {
-@@ -87,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
+@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
if (BAD_ADDR(addr))
return addr;
}
@@ -41960,7 +42037,7 @@ index 21ac5ee..171b1d0 100644
return 0;
}
-@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+@@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
elf_addr_t __user *u_rand_bytes;
const char *k_platform = ELF_PLATFORM;
const char *k_base_platform = ELF_BASE_PLATFORM;
@@ -41977,7 +42054,7 @@ index 21ac5ee..171b1d0 100644
/*
* In some cases (e.g. Hyper-Threading), we want to avoid L1
-@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+@@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
* Generate 16 random bytes for userspace PRNG seeding.
*/
get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
@@ -41992,7 +42069,7 @@ index 21ac5ee..171b1d0 100644
if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
return -EFAULT;
-@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+@@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
return -EFAULT;
current->mm->env_end = p;
@@ -42005,7 +42082,7 @@ index 21ac5ee..171b1d0 100644
return -EFAULT;
return 0;
}
-@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+@@ -381,10 +402,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
{
struct elf_phdr *elf_phdata;
struct elf_phdr *eppnt;
@@ -42018,7 +42095,7 @@ index 21ac5ee..171b1d0 100644
unsigned long total_size;
int retval, i, size;
-@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+@@ -430,6 +451,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
goto out_close;
}
@@ -42030,7 +42107,7 @@ index 21ac5ee..171b1d0 100644
eppnt = elf_phdata;
for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
if (eppnt->p_type == PT_LOAD) {
-@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+@@ -473,8 +499,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
k = load_addr + eppnt->p_vaddr;
if (BAD_ADDR(k) ||
eppnt->p_filesz > eppnt->p_memsz ||
@@ -42041,15 +42118,16 @@ index 21ac5ee..171b1d0 100644
error = -ENOMEM;
goto out_close;
}
-@@ -528,6 +553,193 @@ out:
+@@ -528,6 +554,348 @@ out:
return error;
}
-+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
-+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
++static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
+{
+ unsigned long pax_flags = 0UL;
+
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++
+#ifdef CONFIG_PAX_PAGEEXEC
+ if (elf_phdata->p_flags & PF_PAGEEXEC)
+ pax_flags |= MF_PAX_PAGEEXEC;
@@ -42084,15 +42162,17 @@ index 21ac5ee..171b1d0 100644
+ pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
++#endif
++
+ return pax_flags;
+}
-+#endif
+
-+#ifdef CONFIG_PAX_PT_PAX_FLAGS
-+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
++static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
+{
+ unsigned long pax_flags = 0UL;
+
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++
+#ifdef CONFIG_PAX_PAGEEXEC
+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
+ pax_flags |= MF_PAX_PAGEEXEC;
@@ -42127,15 +42207,17 @@ index 21ac5ee..171b1d0 100644
+ pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
++#endif
++
+ return pax_flags;
+}
-+#endif
+
-+#ifdef CONFIG_PAX_EI_PAX
+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
+{
+ unsigned long pax_flags = 0UL;
+
++#ifdef CONFIG_PAX_EI_PAX
++
+#ifdef CONFIG_PAX_PAGEEXEC
+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
+ pax_flags |= MF_PAX_PAGEEXEC;
@@ -42170,25 +42252,38 @@ index 21ac5ee..171b1d0 100644
+ pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
-+ return pax_flags;
-+}
++#else
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
-+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
-+{
-+ unsigned long pax_flags = 0UL;
++#ifdef CONFIG_PAX_MPROTECT
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
+
-+#ifdef CONFIG_PAX_PT_PAX_FLAGS
-+ unsigned long i;
-+ int found_flags = 0;
++#ifdef CONFIG_PAX_RANDMMAP
++ pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
-+#ifdef CONFIG_PAX_EI_PAX
-+ pax_flags = pax_parse_ei_pax(elf_ex);
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(__supported_pte_mask & _PAGE_NX)) {
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ pax_flags |= MF_PAX_SEGMEXEC;
++ }
+#endif
+
++#endif
++
++ return pax_flags;
++}
++
++static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
++{
++
+#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ unsigned long i;
++
+ for (i = 0UL; i < elf_ex->e_phnum; i++)
+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
@@ -42196,34 +42291,171 @@ index 21ac5ee..171b1d0 100644
+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
-+ return -EINVAL;
++ return ~0UL;
+
+#ifdef CONFIG_PAX_SOFTMODE
+ if (pax_softmode)
-+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
++ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
+ else
+#endif
+
-+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
-+ found_flags = 1;
++ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
+ break;
+ }
+#endif
+
-+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
-+ if (found_flags == 0) {
-+ struct elf_phdr phdr;
-+ memset(&phdr, 0, sizeof(phdr));
-+ phdr.p_flags = PF_NOEMUTRAMP;
-+#ifdef CONFIG_PAX_SOFTMODE
-+ if (pax_softmode)
-+ pax_flags = pax_parse_softmode(&phdr);
++ return ~0UL;
++}
++
++static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
+ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (pax_flags_softmode & MF_PAX_MPROTECT)
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++
++static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
++ pax_flags |= MF_PAX_SEGMEXEC;
+#endif
-+ pax_flags = pax_parse_hardmode(&phdr);
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
+ }
+#endif
+
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++
++static unsigned long pax_parse_xattr_pax(struct file * const file)
++{
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ ssize_t xattr_size, i;
++ unsigned char xattr_value[5];
++ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
++
++ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
++ if (xattr_size <= 0)
++ return ~0UL;
++
++ for (i = 0; i < xattr_size; i++)
++ switch (xattr_value[i]) {
++ default:
++ return ~0UL;
++
++#define parse_flag(option1, option2, flag) \
++ case option1: \
++ pax_flags_hardmode |= MF_PAX_##flag; \
++ break; \
++ case option2: \
++ pax_flags_softmode |= MF_PAX_##flag; \
++ break;
++
++ parse_flag('p', 'P', PAGEEXEC);
++ parse_flag('e', 'E', EMUTRAMP);
++ parse_flag('m', 'M', MPROTECT);
++ parse_flag('r', 'R', RANDMMAP);
++ parse_flag('s', 'S', SEGMEXEC);
++
++#undef parse_flag
++ }
++
++ if (pax_flags_hardmode & pax_flags_softmode)
++ return ~0UL;
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_softmode)
++ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
++ else
++#endif
++
++ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
++#else
++ return ~0UL;
++#endif
++}
++
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
++static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
++{
++ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
++
++ pax_flags = pax_parse_ei_pax(elf_ex);
++ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
++ xattr_pax_flags = pax_parse_xattr_pax(file);
++
++ if (pt_pax_flags == ~0UL)
++ pt_pax_flags = xattr_pax_flags;
++ else if (xattr_pax_flags == ~0UL)
++ xattr_pax_flags = pt_pax_flags;
++ if (pt_pax_flags != xattr_pax_flags)
++ return -EINVAL;
++ if (pt_pax_flags != ~0UL)
++ pax_flags = pt_pax_flags;
++
+ if (0 > pax_check_flags(&pax_flags))
+ return -EINVAL;
+
@@ -42235,7 +42467,7 @@ index 21ac5ee..171b1d0 100644
/*
* These are the functions used to load ELF style executables and shared
* libraries. There is no binary dependent code anywhere else.
-@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
+@@ -544,6 +912,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
{
unsigned int random_variable = 0;
@@ -42247,7 +42479,7 @@ index 21ac5ee..171b1d0 100644
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE)) {
random_variable = get_random_int() & STACK_RND_MASK;
-@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -562,7 +935,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long load_addr = 0, load_bias = 0;
int load_addr_set = 0;
char * elf_interpreter = NULL;
@@ -42256,7 +42488,7 @@ index 21ac5ee..171b1d0 100644
struct elf_phdr *elf_ppnt, *elf_phdata;
unsigned long elf_bss, elf_brk;
int retval, i;
-@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -572,11 +945,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long start_code, end_code, start_data, end_data;
unsigned long reloc_func_desc __maybe_unused = 0;
int executable_stack = EXSTACK_DEFAULT;
@@ -42269,7 +42501,7 @@ index 21ac5ee..171b1d0 100644
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
if (!loc) {
-@@ -713,11 +930,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -713,11 +1086,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
/* OK, This is the point of no return */
current->flags &= ~PF_FORKNOEXEC;
@@ -42294,8 +42526,8 @@ index 21ac5ee..171b1d0 100644
+
+ current->mm->def_flags = 0;
+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
-+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
++ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
+ send_sig(SIGKILL, current, 0);
+ goto out_free_dentry;
+ }
@@ -42352,7 +42584,7 @@ index 21ac5ee..171b1d0 100644
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
-@@ -808,6 +1095,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -808,6 +1251,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
@@ -42373,7 +42605,7 @@ index 21ac5ee..171b1d0 100644
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-@@ -840,9 +1141,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -840,9 +1297,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
* allowed task size. Note that p_filesz must always be
* <= p_memsz so it is only necessary to check p_memsz.
*/
@@ -42386,7 +42618,7 @@ index 21ac5ee..171b1d0 100644
/* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
retval = -EINVAL;
-@@ -870,6 +1171,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -870,6 +1327,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
start_data += load_bias;
end_data += load_bias;
@@ -42398,7 +42630,7 @@ index 21ac5ee..171b1d0 100644
/* Calling set_brk effectively mmaps the pages that we need
* for the bss and break sections. We must do this before
* mapping in the interpreter, to make sure it doesn't wind
-@@ -881,9 +1187,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -881,9 +1343,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto out_free_dentry;
}
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
@@ -42413,7 +42645,7 @@ index 21ac5ee..171b1d0 100644
}
if (elf_interpreter) {
-@@ -1098,7 +1406,7 @@ out:
+@@ -1098,7 +1562,7 @@ out:
* Decide what to dump of a segment, part, all or none.
*/
static unsigned long vma_dump_size(struct vm_area_struct *vma,
@@ -42422,7 +42654,7 @@ index 21ac5ee..171b1d0 100644
{
#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
-@@ -1132,7 +1440,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+@@ -1132,7 +1596,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
if (vma->vm_file == NULL)
return 0;
@@ -42431,7 +42663,7 @@ index 21ac5ee..171b1d0 100644
goto whole;
/*
-@@ -1354,9 +1662,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+@@ -1354,9 +1818,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
{
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
int i = 0;
@@ -42443,7 +42675,7 @@ index 21ac5ee..171b1d0 100644
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
-@@ -1862,14 +2170,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
+@@ -1862,14 +2326,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
}
static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
@@ -42460,7 +42692,7 @@ index 21ac5ee..171b1d0 100644
return size;
}
-@@ -1963,7 +2271,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1963,7 +2427,7 @@ static int elf_core_dump(struct coredump_params *cprm)
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
@@ -42469,7 +42701,7 @@ index 21ac5ee..171b1d0 100644
offset += elf_core_extra_data_size();
e_shoff = offset;
-@@ -1977,10 +2285,12 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1977,10 +2441,12 @@ static int elf_core_dump(struct coredump_params *cprm)
offset = dataoff;
size += sizeof(*elf);
@@ -42482,7 +42714,7 @@ index 21ac5ee..171b1d0 100644
if (size > cprm->limit
|| !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
goto end_coredump;
-@@ -1994,7 +2304,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1994,7 +2460,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
@@ -42491,7 +42723,7 @@ index 21ac5ee..171b1d0 100644
phdr.p_memsz = vma->vm_end - vma->vm_start;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
-@@ -2005,6 +2315,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2005,6 +2471,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_align = ELF_EXEC_PAGESIZE;
size += sizeof(phdr);
@@ -42499,7 +42731,7 @@ index 21ac5ee..171b1d0 100644
if (size > cprm->limit
|| !dump_write(cprm->file, &phdr, sizeof(phdr)))
goto end_coredump;
-@@ -2029,7 +2340,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2029,7 +2496,7 @@ static int elf_core_dump(struct coredump_params *cprm)
unsigned long addr;
unsigned long end;
@@ -42508,7 +42740,7 @@ index 21ac5ee..171b1d0 100644
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
-@@ -2038,6 +2349,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2038,6 +2505,7 @@ static int elf_core_dump(struct coredump_params *cprm)
page = get_dump_page(addr);
if (page) {
void *kaddr = kmap(page);
@@ -42516,7 +42748,7 @@ index 21ac5ee..171b1d0 100644
stop = ((size += PAGE_SIZE) > cprm->limit) ||
!dump_write(cprm->file, kaddr,
PAGE_SIZE);
-@@ -2055,6 +2367,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2055,6 +2523,7 @@ static int elf_core_dump(struct coredump_params *cprm)
if (e_phnum == PN_XNUM) {
size += sizeof(*shdr4extnum);
@@ -42524,7 +42756,7 @@ index 21ac5ee..171b1d0 100644
if (size > cprm->limit
|| !dump_write(cprm->file, shdr4extnum,
sizeof(*shdr4extnum)))
-@@ -2075,6 +2388,97 @@ out:
+@@ -2075,6 +2544,97 @@ out:
#endif /* CONFIG_ELF_CORE */
@@ -43569,10 +43801,10 @@ index 3745f7c..89cc7a3 100644
return rc;
}
diff --git a/fs/exec.c b/fs/exec.c
-index 25dcbe5..4ffaa78 100644
+index 25dcbe5..09c172c 100644
--- a/fs/exec.c
+++ b/fs/exec.c
-@@ -55,12 +55,24 @@
+@@ -55,12 +55,28 @@
#include <linux/pipe_fs_i.h>
#include <linux/oom.h>
#include <linux/compat.h>
@@ -43589,6 +43821,10 @@ index 25dcbe5..4ffaa78 100644
#include <asm/tlb.h>
#include "internal.h"
++#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
++void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
++#endif
++
+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
+EXPORT_SYMBOL(pax_set_initial_flags_func);
@@ -43597,7 +43833,7 @@ index 25dcbe5..4ffaa78 100644
int core_uses_pid;
char core_pattern[CORENAME_MAX_SIZE] = "core";
unsigned int core_pipe_limit;
-@@ -70,7 +82,7 @@ struct core_name {
+@@ -70,7 +86,7 @@ struct core_name {
char *corename;
int used, size;
};
@@ -43606,7 +43842,7 @@ index 25dcbe5..4ffaa78 100644
/* The maximal length of core_pattern is also specified in sysctl.c */
-@@ -188,18 +200,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+@@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
@@ -43628,7 +43864,7 @@ index 25dcbe5..4ffaa78 100644
return NULL;
if (write) {
-@@ -274,6 +278,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+@@ -274,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
vma->vm_end = STACK_TOP_MAX;
vma->vm_start = vma->vm_end - PAGE_SIZE;
vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
@@ -43640,7 +43876,7 @@ index 25dcbe5..4ffaa78 100644
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
INIT_LIST_HEAD(&vma->anon_vma_chain);
-@@ -288,6 +297,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+@@ -288,6 +301,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
mm->stack_vm = mm->total_vm = 1;
up_write(&mm->mmap_sem);
bprm->p = vma->vm_end - sizeof(void *);
@@ -43653,7 +43889,7 @@ index 25dcbe5..4ffaa78 100644
return 0;
err:
up_write(&mm->mmap_sem);
-@@ -396,19 +411,7 @@ err:
+@@ -396,19 +415,7 @@ err:
return err;
}
@@ -43674,7 +43910,7 @@ index 25dcbe5..4ffaa78 100644
{
const char __user *native;
-@@ -417,14 +420,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
+@@ -417,14 +424,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
compat_uptr_t compat;
if (get_user(compat, argv.ptr.compat + nr))
@@ -43691,7 +43927,7 @@ index 25dcbe5..4ffaa78 100644
return native;
}
-@@ -443,7 +446,7 @@ static int count(struct user_arg_ptr argv, int max)
+@@ -443,7 +450,7 @@ static int count(struct user_arg_ptr argv, int max)
if (!p)
break;
@@ -43700,7 +43936,7 @@ index 25dcbe5..4ffaa78 100644
return -EFAULT;
if (i++ >= max)
-@@ -477,7 +480,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
+@@ -477,7 +484,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
ret = -EFAULT;
str = get_user_arg_ptr(argv, argc);
@@ -43709,7 +43945,7 @@ index 25dcbe5..4ffaa78 100644
goto out;
len = strnlen_user(str, MAX_ARG_STRLEN);
-@@ -559,7 +562,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
+@@ -559,7 +566,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
int r;
mm_segment_t oldfs = get_fs();
struct user_arg_ptr argv = {
@@ -43718,7 +43954,7 @@ index 25dcbe5..4ffaa78 100644
};
set_fs(KERNEL_DS);
-@@ -594,7 +597,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+@@ -594,7 +601,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
unsigned long new_end = old_end - shift;
struct mmu_gather tlb;
@@ -43728,7 +43964,7 @@ index 25dcbe5..4ffaa78 100644
/*
* ensure there are no vmas between where we want to go
-@@ -603,6 +607,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+@@ -603,6 +611,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
if (vma != find_vma(mm, new_start))
return -EFAULT;
@@ -43739,7 +43975,7 @@ index 25dcbe5..4ffaa78 100644
/*
* cover the whole range: [new_start, old_end)
*/
-@@ -683,10 +691,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
+@@ -683,10 +695,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
stack_top = arch_align_stack(stack_top);
stack_top = PAGE_ALIGN(stack_top);
@@ -43750,7 +43986,7 @@ index 25dcbe5..4ffaa78 100644
stack_shift = vma->vm_end - stack_top;
bprm->p -= stack_shift;
-@@ -698,8 +702,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
+@@ -698,8 +706,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
bprm->exec -= stack_shift;
down_write(&mm->mmap_sem);
@@ -43779,7 +44015,7 @@ index 25dcbe5..4ffaa78 100644
/*
* Adjust stack execute permissions; explicitly enable for
* EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
-@@ -718,13 +742,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
+@@ -718,13 +746,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
goto out_unlock;
BUG_ON(prev != vma);
@@ -43793,7 +44029,7 @@ index 25dcbe5..4ffaa78 100644
/* mprotect_fixup is overkill to remove the temporary stack flags */
vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
-@@ -805,7 +822,7 @@ int kernel_read(struct file *file, loff_t offset,
+@@ -805,7 +826,7 @@ int kernel_read(struct file *file, loff_t offset,
old_fs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
@@ -43802,7 +44038,7 @@ index 25dcbe5..4ffaa78 100644
set_fs(old_fs);
return result;
}
-@@ -1251,7 +1268,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
+@@ -1251,7 +1272,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
}
rcu_read_unlock();
@@ -43811,7 +44047,7 @@ index 25dcbe5..4ffaa78 100644
bprm->unsafe |= LSM_UNSAFE_SHARE;
} else {
res = -EAGAIN;
-@@ -1454,6 +1471,11 @@ static int do_execve_common(const char *filename,
+@@ -1454,6 +1475,11 @@ static int do_execve_common(const char *filename,
struct user_arg_ptr envp,
struct pt_regs *regs)
{
@@ -43823,7 +44059,7 @@ index 25dcbe5..4ffaa78 100644
struct linux_binprm *bprm;
struct file *file;
struct files_struct *displaced;
-@@ -1461,6 +1483,8 @@ static int do_execve_common(const char *filename,
+@@ -1461,6 +1487,8 @@ static int do_execve_common(const char *filename,
int retval;
const struct cred *cred = current_cred();
@@ -43832,7 +44068,7 @@ index 25dcbe5..4ffaa78 100644
/*
* We move the actual failure in case of RLIMIT_NPROC excess from
* set*uid() to execve() because too many poorly written programs
-@@ -1507,6 +1531,16 @@ static int do_execve_common(const char *filename,
+@@ -1507,6 +1535,16 @@ static int do_execve_common(const char *filename,
bprm->filename = filename;
bprm->interp = filename;
@@ -43849,7 +44085,7 @@ index 25dcbe5..4ffaa78 100644
retval = bprm_mm_init(bprm);
if (retval)
goto out_file;
-@@ -1536,9 +1570,40 @@ static int do_execve_common(const char *filename,
+@@ -1536,9 +1574,40 @@ static int do_execve_common(const char *filename,
if (retval < 0)
goto out;
@@ -43891,7 +44127,7 @@ index 25dcbe5..4ffaa78 100644
/* execve succeeded */
current->fs->in_exec = 0;
-@@ -1549,6 +1614,14 @@ static int do_execve_common(const char *filename,
+@@ -1549,6 +1618,14 @@ static int do_execve_common(const char *filename,
put_files_struct(displaced);
return retval;
@@ -43906,7 +44142,7 @@ index 25dcbe5..4ffaa78 100644
out:
if (bprm->mm) {
acct_arg_size(bprm, 0);
-@@ -1622,7 +1695,7 @@ static int expand_corename(struct core_name *cn)
+@@ -1622,7 +1699,7 @@ static int expand_corename(struct core_name *cn)
{
char *old_corename = cn->corename;
@@ -43915,7 +44151,7 @@ index 25dcbe5..4ffaa78 100644
cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
if (!cn->corename) {
-@@ -1719,7 +1792,7 @@ static int format_corename(struct core_name *cn, long signr)
+@@ -1719,7 +1796,7 @@ static int format_corename(struct core_name *cn, long signr)
int pid_in_pattern = 0;
int err = 0;
@@ -43924,7 +44160,7 @@ index 25dcbe5..4ffaa78 100644
cn->corename = kmalloc(cn->size, GFP_KERNEL);
cn->used = 0;
-@@ -1816,6 +1889,218 @@ out:
+@@ -1816,6 +1893,218 @@ out:
return ispipe;
}
@@ -44143,7 +44379,7 @@ index 25dcbe5..4ffaa78 100644
static int zap_process(struct task_struct *start, int exit_code)
{
struct task_struct *t;
-@@ -2027,17 +2312,17 @@ static void wait_for_dump_helpers(struct file *file)
+@@ -2027,17 +2316,17 @@ static void wait_for_dump_helpers(struct file *file)
pipe = file->f_path.dentry->d_inode->i_pipe;
pipe_lock(pipe);
@@ -44166,7 +44402,7 @@ index 25dcbe5..4ffaa78 100644
pipe_unlock(pipe);
}
-@@ -2098,7 +2383,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2098,7 +2387,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
int retval = 0;
int flag = 0;
int ispipe;
@@ -44175,7 +44411,7 @@ index 25dcbe5..4ffaa78 100644
struct coredump_params cprm = {
.signr = signr,
.regs = regs,
-@@ -2113,6 +2398,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2113,6 +2402,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
audit_core_dumps(signr);
@@ -44185,7 +44421,7 @@ index 25dcbe5..4ffaa78 100644
binfmt = mm->binfmt;
if (!binfmt || !binfmt->core_dump)
goto fail;
-@@ -2180,7 +2468,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2180,7 +2472,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
}
cprm.limit = RLIM_INFINITY;
@@ -44194,7 +44430,7 @@ index 25dcbe5..4ffaa78 100644
if (core_pipe_limit && (core_pipe_limit < dump_count)) {
printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
task_tgid_vnr(current), current->comm);
-@@ -2207,6 +2495,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2207,6 +2499,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
} else {
struct inode *inode;
@@ -44203,7 +44439,7 @@ index 25dcbe5..4ffaa78 100644
if (cprm.limit < binfmt->min_coredump)
goto fail_unlock;
-@@ -2250,7 +2540,7 @@ close_fail:
+@@ -2250,7 +2544,7 @@ close_fail:
filp_close(cprm.file, NULL);
fail_dropcount:
if (ispipe)
@@ -44212,7 +44448,7 @@ index 25dcbe5..4ffaa78 100644
fail_unlock:
kfree(cn.corename);
fail_corename:
-@@ -2269,7 +2559,7 @@ fail:
+@@ -2269,7 +2563,7 @@ fail:
*/
int dump_write(struct file *file, const void *addr, int nr)
{
@@ -47255,7 +47491,7 @@ index acf88ae..4fd6245 100644
if (host_err < 0)
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
-index 41d6743..b805df9 100644
+index 3e65427..ac258be 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -625,6 +625,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
@@ -60105,10 +60341,10 @@ index fd88a39..f4d0bad 100644
};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
-index 7fbaa91..5e6a460 100644
+index 5e30b45..5fdcf66 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -1321,7 +1321,7 @@ struct block_device_operations {
+@@ -1318,7 +1318,7 @@ struct block_device_operations {
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
struct module *owner;
@@ -60406,6 +60642,32 @@ index e9eaec5..bfeb9bb 100644
}
static inline void set_mems_allowed(nodemask_t nodemask)
+diff --git a/include/linux/cred.h b/include/linux/cred.h
+index 4030896..a5c9f09 100644
+--- a/include/linux/cred.h
++++ b/include/linux/cred.h
+@@ -196,6 +196,11 @@ do { \
+ __validate_process_creds(current, __FILE__, __LINE__); \
+ } while(0)
+
++#define validate_task_creds(task) \
++do { \
++ __validate_process_creds((task), __FILE__, __LINE__); \
++} while(0)
++
+ extern void validate_creds_for_do_exit(struct task_struct *);
+ #else
+ static inline void validate_creds(const struct cred *cred)
+@@ -207,6 +212,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
+ static inline void validate_process_creds(void)
+ {
+ }
++static inline void validate_task_creds(struct task_struct *task)
++{
++}
+ #endif
+
+ /**
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index e5e468e..f079672 100644
--- a/include/linux/crypto.h
@@ -60647,6 +60909,18 @@ index 2a53f10..0187fdf 100644
}
/*
+diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
+index 91d0e0a3..035666b 100644
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -105,6 +105,7 @@ struct fsnotify_ops {
+ void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
+ void (*free_event_priv)(struct fsnotify_event_private_data *priv);
+ };
++typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
+
+ /*
+ * A group is a "thing" that wants to receive notification about filesystem
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 96efa67..1261547 100644
--- a/include/linux/ftrace_event.h
@@ -62190,7 +62464,7 @@ index fedc5f0..7cedb6d 100644
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
-index 10a2f62..c8fa287 100644
+index 10a2f62..d655142 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -230,6 +230,8 @@ struct vm_area_struct {
@@ -62207,7 +62481,7 @@ index 10a2f62..c8fa287 100644
struct cpumask cpumask_allocation;
#endif
+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+ unsigned long pax_flags;
+#endif
+
@@ -63716,6 +63990,22 @@ index 65efb92..137adbb 100644
}
static inline void __dec_zone_page_state(struct page *page,
+diff --git a/include/linux/xattr.h b/include/linux/xattr.h
+index aed54c5..3e07f7a 100644
+--- a/include/linux/xattr.h
++++ b/include/linux/xattr.h
+@@ -49,6 +49,11 @@
+ #define XATTR_CAPS_SUFFIX "capability"
+ #define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
+
++/* User namespace */
++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
++#define XATTR_PAX_FLAGS_SUFFIX "flags"
++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
++
+ #ifdef __KERNEL__
+
+ #include <linux/types.h>
diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
index 4aeff96..b378cdc 100644
--- a/include/media/saa7146_vv.h
@@ -63801,13 +64091,13 @@ index 9e5425b..8136ffc 100644
/* Protects from simultaneous access to first_req list */
spinlock_t info_list_lock;
diff --git a/include/net/flow.h b/include/net/flow.h
-index a094477..bc91db1 100644
+index 57f15a7..0de26c6 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
-@@ -207,6 +207,6 @@ extern struct flow_cache_object *flow_cache_lookup(
- u8 dir, flow_resolve_t resolver, void *ctx);
+@@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
extern void flow_cache_flush(void);
+ extern void flow_cache_flush_deferred(void);
-extern atomic_t flow_cache_genid;
+extern atomic_unchecked_t flow_cache_genid;
@@ -63996,28 +64286,6 @@ index 6a72a58..e6a127d 100644
#define SCTP_ENABLE_DEBUG
#define SCTP_DISABLE_DEBUG
#define SCTP_ASSERT(expr, str, func)
-diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
-index f7d9c3f..ec86952 100644
---- a/include/net/sctp/structs.h
-+++ b/include/net/sctp/structs.h
-@@ -241,6 +241,9 @@ extern struct sctp_globals {
- * bits is an indicator of when to send and window update SACK.
- */
- int rwnd_update_shift;
-+
-+ /* Threshold for autoclose timeout, in seconds. */
-+ unsigned long max_autoclose;
- } sctp_globals;
-
- #define sctp_rto_initial (sctp_globals.rto_initial)
-@@ -281,6 +284,7 @@ extern struct sctp_globals {
- #define sctp_auth_enable (sctp_globals.auth_enable)
- #define sctp_checksum_disable (sctp_globals.checksum_disable)
- #define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift)
-+#define sctp_max_autoclose (sctp_globals.max_autoclose)
-
- /* SCTP Socket type: UDP or TCP style. */
- typedef enum {
diff --git a/include/net/sock.h b/include/net/sock.h
index 8e4062f..77b041e 100644
--- a/include/net/sock.h
@@ -65074,7 +65342,7 @@ index 283c529..36ac81e 100644
* nsown_capable - Check superior capability to one's own user_ns
* @cap: The capability in question
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
-index 1d2b6ce..87bf267 100644
+index b7ab0b8..b3a88d2 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -595,6 +595,8 @@ static struct css_set *find_css_set(
@@ -65280,7 +65548,7 @@ index 42e8fa0..9e7406b 100644
return -ENOMEM;
diff --git a/kernel/cred.c b/kernel/cred.c
-index 8ef31f5..d7d50d8 100644
+index 8ef31f5..a4a483a 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
@@ -65319,7 +65587,7 @@ index 8ef31f5..d7d50d8 100644
new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
if (!new)
return NULL;
-@@ -281,12 +289,14 @@ error:
+@@ -281,13 +289,15 @@ error:
*
* Call commit_creds() or abort_creds() to clean up.
*/
@@ -65331,11 +65599,13 @@ index 8ef31f5..d7d50d8 100644
const struct cred *old;
struct cred *new;
+- validate_process_creds();
+ pax_track_stack();
+
- validate_process_creds();
++ validate_task_creds(task);
new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
+ if (!new)
@@ -322,6 +332,11 @@ error:
abort_creds(new);
return NULL;
@@ -65390,11 +65660,25 @@ index 8ef31f5..d7d50d8 100644
/* dumpability changes */
if (old->euid != new->euid ||
old->egid != new->egid ||
-@@ -538,6 +560,64 @@ int commit_creds(struct cred *new)
+@@ -538,6 +560,87 @@ int commit_creds(struct cred *new)
put_cred(old);
return 0;
}
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++static int set_task_user(struct user_namespace *user_ns, struct cred *new)
++{
++ struct user_struct *new_user;
++
++ new_user = alloc_uid(user_ns, new->uid);
++ if (!new_user)
++ return -EAGAIN;
++ free_uid(new->user);
++ new->user = new_user;
++ return 0;
++}
++#endif
++
+int commit_creds(struct cred *new)
+{
+#ifdef CONFIG_GRKERNSEC_SETXID
@@ -65402,6 +65686,10 @@ index 8ef31f5..d7d50d8 100644
+ struct cred *ncred;
+ const struct cred *old;
+
++ /* we won't get called with tasklist_lock held for writing
++ and interrupts disabled as the cred struct in that case is
++ init_cred
++ */
+ if (grsec_enable_setxid && !current_is_single_threaded() &&
+ !current_uid() && new->uid) {
+ rcu_read_lock();
@@ -65436,6 +65724,11 @@ index 8ef31f5..d7d50d8 100644
+ ncred->cap_effective = new->cap_effective;
+ ncred->cap_bset = new->cap_bset;
+
++ if (set_task_user(old->user_ns, ncred)) {
++ abort_creds(ncred);
++ goto die;
++ }
++
+ __commit_creds(t, ncred);
+ }
+ read_unlock(&tasklist_lock);
@@ -65455,7 +65748,7 @@ index 8ef31f5..d7d50d8 100644
EXPORT_SYMBOL(commit_creds);
/**
-@@ -549,6 +629,8 @@ EXPORT_SYMBOL(commit_creds);
+@@ -549,6 +652,8 @@ EXPORT_SYMBOL(commit_creds);
*/
void abort_creds(struct cred *new)
{
@@ -65464,7 +65757,7 @@ index 8ef31f5..d7d50d8 100644
kdebug("abort_creds(%p{%d,%d})", new,
atomic_read(&new->usage),
read_cred_subscribers(new));
-@@ -572,6 +654,8 @@ const struct cred *override_creds(const struct cred *new)
+@@ -572,6 +677,8 @@ const struct cred *override_creds(const struct cred *new)
{
const struct cred *old = current->cred;
@@ -65473,7 +65766,7 @@ index 8ef31f5..d7d50d8 100644
kdebug("override_creds(%p{%d,%d})", new,
atomic_read(&new->usage),
read_cred_subscribers(new));
-@@ -601,6 +685,8 @@ void revert_creds(const struct cred *old)
+@@ -601,6 +708,8 @@ void revert_creds(const struct cred *old)
{
const struct cred *override = current->cred;
@@ -65482,7 +65775,7 @@ index 8ef31f5..d7d50d8 100644
kdebug("revert_creds(%p{%d,%d})", old,
atomic_read(&old->usage),
read_cred_subscribers(old));
-@@ -647,6 +733,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
+@@ -647,6 +756,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
const struct cred *old;
struct cred *new;
@@ -65491,7 +65784,7 @@ index 8ef31f5..d7d50d8 100644
new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
if (!new)
return NULL;
-@@ -701,6 +789,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
+@@ -701,6 +812,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
*/
int set_security_override(struct cred *new, u32 secid)
{
@@ -65500,7 +65793,7 @@ index 8ef31f5..d7d50d8 100644
return security_kernel_act_as(new, secid);
}
EXPORT_SYMBOL(set_security_override);
-@@ -720,6 +810,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
+@@ -720,6 +833,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
u32 secid;
int ret;
@@ -65695,7 +65988,7 @@ index 0f85778..0d43716 100644
/*
diff --git a/kernel/exit.c b/kernel/exit.c
-index 2913b35..4465c81 100644
+index 9e316ae..b3656d5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -57,6 +57,10 @@
@@ -66065,7 +66358,7 @@ index 8e6b6f4..9dccf00 100644
else
new_fs = fs;
diff --git a/kernel/futex.c b/kernel/futex.c
-index 11cbe05..c5dab58 100644
+index e6160fa..edf9565 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -54,6 +54,7 @@
@@ -66088,7 +66381,7 @@ index 11cbe05..c5dab58 100644
/*
* The futex address must be "naturally" aligned.
*/
-@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
+@@ -1875,6 +1881,8 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
struct futex_q q = futex_q_init;
int ret;
@@ -66097,7 +66390,7 @@ index 11cbe05..c5dab58 100644
if (!bitset)
return -EINVAL;
q.bitset = bitset;
-@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2271,6 +2279,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
struct futex_q q = futex_q_init;
int res, ret;
@@ -66106,7 +66399,7 @@ index 11cbe05..c5dab58 100644
if (!bitset)
return -EINVAL;
-@@ -2447,6 +2457,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
+@@ -2459,6 +2469,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
if (!p)
goto err_unlock;
ret = -EPERM;
@@ -66117,7 +66410,7 @@ index 11cbe05..c5dab58 100644
pcred = __task_cred(p);
/* If victim is in different user_ns, then uids are not
comparable, so we must have CAP_SYS_PTRACE */
-@@ -2712,6 +2726,7 @@ static int __init futex_init(void)
+@@ -2724,6 +2738,7 @@ static int __init futex_init(void)
{
u32 curval;
int i;
@@ -66125,7 +66418,7 @@ index 11cbe05..c5dab58 100644
/*
* This will fail and we want it. Some arch implementations do
-@@ -2723,8 +2738,11 @@ static int __init futex_init(void)
+@@ -2735,8 +2750,11 @@ static int __init futex_init(void)
* implementation, the non-functional ones will return
* -ENOSYS.
*/
@@ -67759,10 +68052,10 @@ index 961b389..c451353 100644
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
-index a70d2a5..cbd4b4f 100644
+index 67d1fdd..1af21e2 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
-@@ -161,7 +161,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
+@@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
return ret;
}
@@ -67772,7 +68065,7 @@ index a70d2a5..cbd4b4f 100644
{
const struct cred *cred = current_cred(), *tcred;
-@@ -187,7 +188,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+@@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
cred->gid == tcred->sgid &&
cred->gid == tcred->gid))
goto ok;
@@ -67782,7 +68075,7 @@ index a70d2a5..cbd4b4f 100644
goto ok;
rcu_read_unlock();
return -EPERM;
-@@ -196,7 +198,9 @@ ok:
+@@ -207,7 +209,9 @@ ok:
smp_rmb();
if (task->mm)
dumpable = get_dumpable(task->mm);
@@ -67793,7 +68086,7 @@ index a70d2a5..cbd4b4f 100644
return -EPERM;
return security_ptrace_access_check(task, mode);
-@@ -206,7 +210,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
+@@ -217,7 +221,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
{
int err;
task_lock(task);
@@ -67811,7 +68104,7 @@ index a70d2a5..cbd4b4f 100644
task_unlock(task);
return !err;
}
-@@ -251,7 +264,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+@@ -262,7 +275,7 @@ static int ptrace_attach(struct task_struct *task, long request,
goto out;
task_lock(task);
@@ -67820,7 +68113,7 @@ index a70d2a5..cbd4b4f 100644
task_unlock(task);
if (retval)
goto unlock_creds;
-@@ -266,7 +279,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+@@ -277,7 +290,7 @@ static int ptrace_attach(struct task_struct *task, long request,
task->ptrace = PT_PTRACED;
if (seize)
task->ptrace |= PT_SEIZED;
@@ -67829,7 +68122,7 @@ index a70d2a5..cbd4b4f 100644
task->ptrace |= PT_PTRACE_CAP;
__ptrace_link(task, current);
-@@ -461,6 +474,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
+@@ -472,6 +485,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
{
int copied = 0;
@@ -67838,7 +68131,7 @@ index a70d2a5..cbd4b4f 100644
while (len > 0) {
char buf[128];
int this_len, retval;
-@@ -472,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
+@@ -483,7 +498,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
break;
return -EIO;
}
@@ -67847,7 +68140,7 @@ index a70d2a5..cbd4b4f 100644
return -EFAULT;
copied += retval;
src += retval;
-@@ -486,6 +501,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
+@@ -497,6 +512,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
{
int copied = 0;
@@ -67856,7 +68149,7 @@ index a70d2a5..cbd4b4f 100644
while (len > 0) {
char buf[128];
int this_len, retval;
-@@ -669,10 +686,12 @@ int ptrace_request(struct task_struct *child, long request,
+@@ -680,10 +697,12 @@ int ptrace_request(struct task_struct *child, long request,
bool seized = child->ptrace & PT_SEIZED;
int ret = -EIO;
siginfo_t siginfo, *si;
@@ -67870,7 +68163,7 @@ index a70d2a5..cbd4b4f 100644
switch (request) {
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
-@@ -871,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+@@ -882,14 +901,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
goto out;
}
@@ -67893,7 +68186,7 @@ index a70d2a5..cbd4b4f 100644
goto out_put_task_struct;
}
-@@ -904,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+@@ -915,7 +941,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
if (copied != sizeof(tmp))
return -EIO;
@@ -67902,7 +68195,7 @@ index a70d2a5..cbd4b4f 100644
}
int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
-@@ -927,6 +953,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
+@@ -938,6 +964,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
siginfo_t siginfo;
int ret;
@@ -67911,7 +68204,7 @@ index a70d2a5..cbd4b4f 100644
switch (request) {
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
-@@ -1014,14 +1042,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+@@ -1025,14 +1053,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
goto out;
}
@@ -68500,7 +68793,7 @@ index bc8ee99..b6f6492 100644
int this_cpu = smp_processor_id();
struct rq *this_rq = cpu_rq(this_cpu);
diff --git a/kernel/signal.c b/kernel/signal.c
-index 291c970..304bd03 100644
+index 195331c..e89634ce 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
@@ -68618,7 +68911,7 @@ index 291c970..304bd03 100644
memset(&info, 0, sizeof info);
info.si_signo = signr;
info.si_code = exit_code;
-@@ -2748,7 +2773,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
+@@ -2746,7 +2771,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
int error = -ESRCH;
rcu_read_lock();
@@ -69076,7 +69369,7 @@ index 11d65b5..6957b37 100644
EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
EXPORT_SYMBOL(register_sysctl_table);
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
-index e8bffbe..82bf0a4 100644
+index 2ce1b30..82bf0a4 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
@@ -69142,15 +69435,6 @@ index e8bffbe..82bf0a4 100644
set_fs(old_fs);
if (result < 0)
goto out;
-@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
-
- fput(file);
- out_putname:
-- putname(pathname);
-+ __putname(pathname);
- out:
- return result;
- }
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index 362da65..ab8ef8c 100644
--- a/kernel/sysctl_check.c
@@ -69992,7 +70276,7 @@ index f2f1ca1..0645f06 100644
from userspace allocation. Keeping a user from writing to low pages
can help reduce the impact of kernel NULL pointer bugs.
diff --git a/mm/filemap.c b/mm/filemap.c
-index 7771871..91bcdb4 100644
+index b91f3aa..d0ac1d4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1784,7 +1784,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
@@ -70004,7 +70288,7 @@ index 7771871..91bcdb4 100644
file_accessed(file);
vma->vm_ops = &generic_file_vm_ops;
vma->vm_flags |= VM_CAN_NONLINEAR;
-@@ -2190,6 +2190,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
+@@ -2187,6 +2187,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
*pos = i_size_read(inode);
if (limit != RLIM_INFINITY) {
@@ -70080,7 +70364,7 @@ index d819d93..468e18f 100644
cond_resched();
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 73f17c0..fef0140 100644
+index 2316840..b418671 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -70952,14 +71236,10 @@ index b2b8731..6080174 100644
* Make sure the vDSO gets into every core dump.
* Dumping its contents makes post-mortem fully interpretable later
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
-index 9c51f9f..f2b1c49 100644
+index 2775fd0..f2b1c49 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
-@@ -636,20 +636,33 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
- struct vm_area_struct *prev;
- struct vm_area_struct *vma;
- int err = 0;
-+ pgoff_t pgoff;
+@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
unsigned long vmstart;
unsigned long vmend;
@@ -70970,27 +71250,7 @@ index 9c51f9f..f2b1c49 100644
vma = find_vma_prev(mm, start, &prev);
if (!vma || vma->vm_start > start)
return -EFAULT;
-
-+ if (start > vma->vm_start)
-+ prev = vma;
-+
- for (; vma && vma->vm_start < end; prev = vma, vma = next) {
- next = vma->vm_next;
- vmstart = max(start, vma->vm_start);
- vmend = min(end, vma->vm_end);
-
-+ if (mpol_equal(vma_policy(vma), new_pol))
-+ continue;
-+
-+ pgoff = vma->vm_pgoff +
-+ ((vmstart - vma->vm_start) >> PAGE_SHIFT);
- prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
-- vma->anon_vma, vma->vm_file, vma->vm_pgoff,
-+ vma->anon_vma, vma->vm_file, pgoff,
- new_pol);
- if (prev) {
- vma = prev;
-@@ -669,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+@@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
err = policy_vma(vma, new_pol);
if (err)
goto out;
@@ -71007,7 +71267,7 @@ index 9c51f9f..f2b1c49 100644
}
out:
-@@ -1102,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
+@@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
if (end < start)
return -EINVAL;
@@ -71025,7 +71285,7 @@ index 9c51f9f..f2b1c49 100644
if (end == start)
return 0;
-@@ -1320,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+@@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
if (!mm)
goto out;
@@ -71040,7 +71300,7 @@ index 9c51f9f..f2b1c49 100644
/*
* Check if this process has the right to modify the specified
* process. The right exists if the process has administrative
-@@ -1329,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+@@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
rcu_read_lock();
tcred = __task_cred(task);
if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
@@ -72849,19 +73109,6 @@ index 4358032..e79b99f 100644
*region = *vma->vm_region;
new->vm_region = region;
-diff --git a/mm/oom_kill.c b/mm/oom_kill.c
-index 626303b..e9a1785 100644
---- a/mm/oom_kill.c
-+++ b/mm/oom_kill.c
-@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p,
- unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
- const nodemask_t *nodemask, unsigned long totalpages)
- {
-- int points;
-+ long points;
-
- if (oom_unkillable_task(p, mem, nodemask))
- return 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e8fae15..18c0442 100644
--- a/mm/page_alloc.c
@@ -72934,7 +73181,7 @@ index e8fae15..18c0442 100644
}
return 0;
diff --git a/mm/percpu.c b/mm/percpu.c
-index 93b5a7c..28d642c 100644
+index 0ae7a09..613118e 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
@@ -73616,7 +73863,7 @@ index bf39181..727f7a3 100644
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
-index 7c54fe8..0bb4ac5 100644
+index 7c54fe8..ce9940d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -208,7 +208,7 @@ struct track {
@@ -73637,7 +73884,19 @@ index 7c54fe8..0bb4ac5 100644
s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
#ifdef CONFIG_STACKTRACE
{
-@@ -2456,6 +2456,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
+@@ -2077,6 +2077,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ goto new_slab;
+ }
+
++ /* must check again c->freelist in case of cpu migration or IRQ */
++ object = c->freelist;
++ if (object)
++ goto load_freelist;
++
+ stat(s, ALLOC_SLOWPATH);
+
+ do {
+@@ -2456,6 +2461,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
page = virt_to_head_page(x);
@@ -73646,7 +73905,7 @@ index 7c54fe8..0bb4ac5 100644
slab_free(s, page, x, _RET_IP_);
trace_kmem_cache_free(_RET_IP_, x);
-@@ -2489,7 +2491,7 @@ static int slub_min_objects;
+@@ -2489,7 +2496,7 @@ static int slub_min_objects;
* Merge control. If this is set then no merging of slab caches will occur.
* (Could be removed. This was introduced to pacify the merge skeptics.)
*/
@@ -73655,7 +73914,7 @@ index 7c54fe8..0bb4ac5 100644
/*
* Calculate the order of allocation given an slab object size.
-@@ -2912,7 +2914,7 @@ static int kmem_cache_open(struct kmem_cache *s,
+@@ -2912,7 +2919,7 @@ static int kmem_cache_open(struct kmem_cache *s,
* list to avoid pounding the page allocator excessively.
*/
set_min_partial(s, ilog2(s->size));
@@ -73664,7 +73923,7 @@ index 7c54fe8..0bb4ac5 100644
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
#endif
-@@ -3017,8 +3019,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
+@@ -3017,8 +3024,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s)
{
down_write(&slub_lock);
@@ -73674,7 +73933,7 @@ index 7c54fe8..0bb4ac5 100644
list_del(&s->list);
if (kmem_cache_close(s)) {
printk(KERN_ERR "SLUB %s: %s called for cache that "
-@@ -3228,6 +3229,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
+@@ -3228,6 +3234,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(__kmalloc_node);
#endif
@@ -73725,7 +73984,7 @@ index 7c54fe8..0bb4ac5 100644
size_t ksize(const void *object)
{
struct page *page;
-@@ -3502,7 +3547,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
+@@ -3502,7 +3552,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
int node;
list_add(&s->list, &slab_caches);
@@ -73734,7 +73993,7 @@ index 7c54fe8..0bb4ac5 100644
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
-@@ -3619,17 +3664,17 @@ void __init kmem_cache_init(void)
+@@ -3619,17 +3669,17 @@ void __init kmem_cache_init(void)
/* Caches that are not of the two-to-the-power-of size */
if (KMALLOC_MIN_SIZE <= 32) {
@@ -73755,7 +74014,7 @@ index 7c54fe8..0bb4ac5 100644
caches++;
}
-@@ -3697,7 +3742,7 @@ static int slab_unmergeable(struct kmem_cache *s)
+@@ -3697,7 +3747,7 @@ static int slab_unmergeable(struct kmem_cache *s)
/*
* We may have set a slab to be unmergeable during bootstrap.
*/
@@ -73764,7 +74023,7 @@ index 7c54fe8..0bb4ac5 100644
return 1;
return 0;
-@@ -3756,7 +3801,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+@@ -3756,7 +3806,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
@@ -73773,7 +74032,7 @@ index 7c54fe8..0bb4ac5 100644
/*
* Adjust the object sizes so that we clear
* the complete object on kzalloc.
-@@ -3765,7 +3810,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+@@ -3765,7 +3815,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
if (sysfs_slab_alias(s, name)) {
@@ -73782,7 +74041,7 @@ index 7c54fe8..0bb4ac5 100644
goto err;
}
up_write(&slub_lock);
-@@ -3893,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
+@@ -3893,7 +3943,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
}
#endif
@@ -73791,7 +74050,7 @@ index 7c54fe8..0bb4ac5 100644
static int count_inuse(struct page *page)
{
return page->inuse;
-@@ -4280,12 +4325,12 @@ static void resiliency_test(void)
+@@ -4280,12 +4330,12 @@ static void resiliency_test(void)
validate_slab_cache(kmalloc_caches[9]);
}
#else
@@ -73806,7 +74065,7 @@ index 7c54fe8..0bb4ac5 100644
enum slab_stat_type {
SL_ALL, /* All slabs */
SL_PARTIAL, /* Only partially allocated slabs */
-@@ -4495,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
+@@ -4495,7 +4545,7 @@ SLAB_ATTR_RO(ctor);
static ssize_t aliases_show(struct kmem_cache *s, char *buf)
{
@@ -73815,7 +74074,7 @@ index 7c54fe8..0bb4ac5 100644
}
SLAB_ATTR_RO(aliases);
-@@ -5025,6 +5070,7 @@ static char *create_unique_id(struct kmem_cache *s)
+@@ -5025,6 +5075,7 @@ static char *create_unique_id(struct kmem_cache *s)
return name;
}
@@ -73823,7 +74082,7 @@ index 7c54fe8..0bb4ac5 100644
static int sysfs_slab_add(struct kmem_cache *s)
{
int err;
-@@ -5087,6 +5133,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
+@@ -5087,6 +5138,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
kobject_del(&s->kobj);
kobject_put(&s->kobj);
}
@@ -73831,7 +74090,7 @@ index 7c54fe8..0bb4ac5 100644
/*
* Need to buffer aliases during bootup until sysfs becomes
-@@ -5100,6 +5147,7 @@ struct saved_alias {
+@@ -5100,6 +5152,7 @@ struct saved_alias {
static struct saved_alias *alias_list;
@@ -73839,7 +74098,7 @@ index 7c54fe8..0bb4ac5 100644
static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
struct saved_alias *al;
-@@ -5122,6 +5170,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+@@ -5122,6 +5175,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
alias_list = al;
return 0;
}
@@ -73847,7 +74106,7 @@ index 7c54fe8..0bb4ac5 100644
static int __init slab_sysfs_init(void)
{
-@@ -5257,7 +5306,13 @@ static const struct file_operations proc_slabinfo_operations = {
+@@ -5257,7 +5311,13 @@ static const struct file_operations proc_slabinfo_operations = {
static int __init slab_proc_init(void)
{
@@ -74562,6 +74821,59 @@ index ea7f031..0615edc 100644
hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
}
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index b3bdb48..7ad90ac 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -2145,7 +2145,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
+ void *ptr = req->data;
+ int type, olen;
+ unsigned long val;
+- struct l2cap_conf_rfc rfc;
++ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+
+ BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
+
+@@ -2169,8 +2169,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
+ break;
+
+ case L2CAP_CONF_RFC:
+- if (olen == sizeof(rfc))
+- memcpy(&rfc, (void *)val, olen);
++ if (olen != sizeof(rfc))
++ break;
++
++ memcpy(&rfc, (void *)val, olen);
+
+ if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
+ rfc.mode != chan->mode)
+@@ -2258,12 +2260,24 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
+
+ switch (type) {
+ case L2CAP_CONF_RFC:
+- if (olen == sizeof(rfc))
+- memcpy(&rfc, (void *)val, olen);
++ if (olen != sizeof(rfc))
++ break;
++
++ memcpy(&rfc, (void *)val, olen);
+ goto done;
+ }
+ }
+
++ /* Use sane default values in case a misbehaving remote device
++ * did not send an RFC option.
++ */
++ rfc.mode = chan->mode;
++ rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
++ rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
++ rfc.max_pdu_size = cpu_to_le16(chan->imtu);
++
++ BT_ERR("Expected RFC option was not found, using defaults");
++
+ done:
+ switch (rfc.mode) {
+ case L2CAP_MODE_ERTM:
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index e79ff75..215b57d 100644
--- a/net/bridge/br_multicast.c
@@ -74972,7 +75284,7 @@ index ae5cf2d..2c950a1 100644
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
diff --git a/net/core/flow.c b/net/core/flow.c
-index 555a456..de48421 100644
+index d6968e5..1690d9d 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -61,7 +61,7 @@ struct flow_cache {
@@ -75446,10 +75758,10 @@ index 8905e92..0b179fb 100644
msg.msg_flags = flags;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
-index 472a8c4..6507cd4 100644
+index 004bb74..8d4a58c 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
-@@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
+@@ -317,7 +317,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
mm_segment_t oldfs = get_fs();
set_fs(get_ds());
@@ -75458,7 +75770,7 @@ index 472a8c4..6507cd4 100644
set_fs(oldfs);
return res;
}
-@@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
+@@ -328,7 +328,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
mm_segment_t oldfs = get_fs();
set_fs(get_ds());
@@ -75467,7 +75779,7 @@ index 472a8c4..6507cd4 100644
set_fs(oldfs);
return res;
}
-@@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
+@@ -339,7 +339,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
mm_segment_t oldfs = get_fs();
set_fs(get_ds());
@@ -75564,10 +75876,10 @@ index 61714bd..c9cee6d 100644
static int raw_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index 05ac666c..82384a7 100644
+index b563854..e03f8a6 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
-@@ -309,7 +309,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
+@@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
static inline int rt_genid(struct net *net)
{
@@ -75576,7 +75888,7 @@ index 05ac666c..82384a7 100644
}
#ifdef CONFIG_PROC_FS
-@@ -842,7 +842,7 @@ static void rt_cache_invalidate(struct net *net)
+@@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
unsigned char shuffle;
get_random_bytes(&shuffle, sizeof(shuffle));
@@ -75585,7 +75897,7 @@ index 05ac666c..82384a7 100644
redirect_genid++;
}
-@@ -2920,7 +2920,7 @@ static int rt_fill_info(struct net *net,
+@@ -3015,7 +3015,7 @@ static int rt_fill_info(struct net *net,
error = rt->dst.error;
if (peer) {
inet_peer_refcheck(rt->peer);
@@ -77626,19 +77938,6 @@ index 7635107..5000b71 100644
_proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
-diff --git a/net/sctp/associola.c b/net/sctp/associola.c
-index dc16b90..4981482 100644
---- a/net/sctp/associola.c
-+++ b/net/sctp/associola.c
-@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
- asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
-- (unsigned long)sp->autoclose * HZ;
-+ min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
-
- /* Initializes the timers */
- for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 865e68f..bf81204 100644
--- a/net/sctp/auth.c
@@ -77666,34 +77965,11 @@ index 05a6ce2..c8bf836 100644
assoc->state, hash,
assoc->assoc_id,
assoc->sndbuf_used,
-diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
-index 91784f4..48cb7b9 100644
---- a/net/sctp/protocol.c
-+++ b/net/sctp/protocol.c
-@@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
- sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
- sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
-
-+ /* Initialize maximum autoclose timeout. */
-+ sctp_max_autoclose = INT_MAX / HZ;
-+
- /* Initialize handle used for association ids. */
- idr_init(&sctp_assocs_id);
-
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
-index 836aa63..e44d3fb 100644
+index 4760f4e..e44d3fb 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
-@@ -2199,8 +2199,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
- return -EINVAL;
- if (copy_from_user(&sp->autoclose, optval, optlen))
- return -EFAULT;
-- /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
-- sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
-
- return 0;
- }
-@@ -4575,7 +4573,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
+@@ -4573,7 +4573,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
if (space_left < addrlen)
return -ENOMEM;
@@ -77702,37 +77978,6 @@ index 836aa63..e44d3fb 100644
return -EFAULT;
to += addrlen;
cnt++;
-diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
-index 6b39529..60ffbd0 100644
---- a/net/sctp/sysctl.c
-+++ b/net/sctp/sysctl.c
-@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
- static int sack_timer_max = 500;
- static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
- static int rwnd_scale_max = 16;
-+static unsigned long max_autoclose_min = 0;
-+static unsigned long max_autoclose_max =
-+ (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
-+ ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
-
- extern long sysctl_sctp_mem[3];
- extern int sysctl_sctp_rmem[3];
-@@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
- .extra1 = &one,
- .extra2 = &rwnd_scale_max,
- },
-+ {
-+ .procname = "max_autoclose",
-+ .data = &sctp_max_autoclose,
-+ .maxlen = sizeof(unsigned long),
-+ .mode = 0644,
-+ .proc_handler = &proc_doulongvec_minmax,
-+ .extra1 = &max_autoclose_min,
-+ .extra2 = &max_autoclose_max,
-+ },
-
- { /* sentinel */ }
- };
diff --git a/net/socket.c b/net/socket.c
index ffe92ca..8057b85 100644
--- a/net/socket.c
@@ -78408,7 +78653,7 @@ index fdbc23c..212d53e 100644
iwp->length += essid_compat;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
-index 552df27..8e7f238 100644
+index 7e088c0..dd3f206 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
@@ -78465,7 +78710,7 @@ index 552df27..8e7f238 100644
return xdst;
}
-@@ -2335,7 +2335,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
+@@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
if (xdst->xfrm_genid != dst->xfrm->genid)
return 0;
if (xdst->num_pols > 0 &&
@@ -78474,7 +78719,7 @@ index 552df27..8e7f238 100644
return 0;
mtu = dst_mtu(dst->child);
-@@ -2870,7 +2870,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
+@@ -2880,7 +2880,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
sizeof(pol->xfrm_vec[i].saddr));
pol->xfrm_vec[i].encap_family = mp->new_family;
/* flush bundles */
@@ -78821,10 +79066,10 @@ index 5c11312..72742b5 100644
write_hex_cnt = 0;
for (i = 0; i < logo_clutsize; i++) {
diff --git a/security/Kconfig b/security/Kconfig
-index e0f08b5..7388edd 100644
+index e0f08b5..649220f 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,586 @@
+@@ -4,6 +4,626 @@
menu "Security options"
@@ -78864,12 +79109,11 @@ index e0f08b5..7388edd 100644
+
+config PAX_SOFTMODE
+ bool 'Support soft mode'
-+ select PAX_PT_PAX_FLAGS
+ help
+ Enabling this option will allow you to run PaX in soft mode, that
+ is, PaX features will not be enforced by default, only on executables
-+ marked explicitly. You must also enable PT_PAX_FLAGS support as it
-+ is the only way to mark executables for soft mode use.
++ marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
++ support as they are the only way to mark executables for soft mode use.
+
+ Soft mode can be activated by using the "pax_softmode=1" kernel command
+ line option on boot. Furthermore you can control various PaX features
@@ -78884,10 +79128,15 @@ index e0f08b5..7388edd 100644
+ an otherwise reserved part of the ELF header. This marking has
+ numerous drawbacks (no support for soft-mode, toolchain does not
+ know about the non-standard use of the ELF header) therefore it
-+ has been deprecated in favour of PT_PAX_FLAGS support.
++ has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
++ support.
++
++ If you have applications not marked by the PT_PAX_FLAGS ELF program
++ header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
++ option otherwise they will not get any protection.
+
-+ Note that if you enable PT_PAX_FLAGS marking support as well,
-+ the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
++ Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
++ support as well, they will override the legacy EI_PAX marks.
+
+config PAX_PT_PAX_FLAGS
+ bool 'Use ELF program header marking'
@@ -78900,12 +79149,49 @@ index e0f08b5..7388edd 100644
+ integrated into the toolchain (the binutils patch is available
+ from http://pax.grsecurity.net).
+
-+ If your toolchain does not support PT_PAX_FLAGS markings,
-+ you can create one in most cases with 'paxctl -C'.
++ If you have applications not marked by the PT_PAX_FLAGS ELF program
++ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
++ support otherwise they will not get any protection.
++
++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
++ must make sure that the marks are the same if a binary has both marks.
+
+ Note that if you enable the legacy EI_PAX marking support as well,
+ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
+
++config PAX_XATTR_PAX_FLAGS
++ bool 'Use filesystem extended attributes marking'
++ depends on EXPERT
++ select CIFS_XATTR if CIFS
++ select EXT2_FS_XATTR if EXT2_FS
++ select EXT3_FS_XATTR if EXT3_FS
++ select EXT4_FS_XATTR if EXT4_FS
++ select JFFS2_FS_XATTR if JFFS2_FS
++ select REISERFS_FS_XATTR if REISERFS_FS
++ select SQUASHFS_XATTR if SQUASHFS
++ select TMPFS_XATTR if TMPFS
++ select UBIFS_FS_XATTR if UBIFS_FS
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'setfattr' utility. The control
++ flags will be read from the user.pax.flags extended attribute of
++ the file. This marking has the benefit of supporting binary-only
++ applications that self-check themselves (e.g., skype) and would
++ not tolerate chpax/paxctl changes. The main drawback is that
++ extended attributes are not supported by some filesystems (e.g.,
++ isofs, udf, vfat) so copying files through such filesystems will
++ lose the extended attributes and these PaX markings.
++
++ If you have applications not marked by the PT_PAX_FLAGS ELF program
++ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
++ support otherwise they will not get any protection.
++
++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
++ must make sure that the marks are the same if a binary has both marks.
++
++ Note that if you enable the legacy EI_PAX marking support as well,
++ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
++
+choice
+ prompt 'MAC system integration'
+ default PAX_HAVE_ACL_FLAGS
@@ -78937,7 +79223,7 @@ index e0f08b5..7388edd 100644
+
+config PAX_NOEXEC
+ bool "Enforce non-executable pages"
-+ depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
++ depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
+ help
+ By design some architectures do not allow for protecting memory
+ pages against execution or even if they do, Linux does not make
@@ -79212,7 +79498,6 @@ index e0f08b5..7388edd 100644
+
+config PAX_ASLR
+ bool "Address Space Layout Randomization"
-+ depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
+ help
+ Many if not most exploit techniques rely on the knowledge of
+ certain addresses in the attacked program. The following options
@@ -79332,10 +79617,10 @@ index e0f08b5..7388edd 100644
+ before deploying it.
+
+ Note: full support for this feature requires gcc with plugin support
-+ so make sure your compiler is at least gcc 4.5.0 (cross compilation
-+ is not supported). Using older gcc versions means that functions
-+ with large enough stack frames may leave uninitialized memory behind
-+ that may be exposed to a later syscall leaking the stack.
++ so make sure your compiler is at least gcc 4.5.0. Using older gcc
++ versions means that functions with large enough stack frames may
++ leave uninitialized memory behind that may be exposed to a later
++ syscall leaking the stack.
+
+config PAX_MEMORY_UDEREF
+ bool "Prevent invalid userland pointer dereference"
@@ -79411,7 +79696,7 @@ index e0f08b5..7388edd 100644
config KEYS
bool "Enable access key retention support"
help
-@@ -167,7 +747,7 @@ config INTEL_TXT
+@@ -167,7 +787,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
diff --git a/3.1.7/4421_grsec-remove-localversion-grsec.patch b/3.1.8/4421_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.1.7/4421_grsec-remove-localversion-grsec.patch
+++ b/3.1.8/4421_grsec-remove-localversion-grsec.patch
diff --git a/3.1.7/4422_grsec-mute-warnings.patch b/3.1.8/4422_grsec-mute-warnings.patch
index e85abd6..e85abd6 100644
--- a/3.1.7/4422_grsec-mute-warnings.patch
+++ b/3.1.8/4422_grsec-mute-warnings.patch
diff --git a/3.1.7/4423_grsec-remove-protected-paths.patch b/3.1.8/4423_grsec-remove-protected-paths.patch
index 4afb3e2..4afb3e2 100644
--- a/3.1.7/4423_grsec-remove-protected-paths.patch
+++ b/3.1.8/4423_grsec-remove-protected-paths.patch
diff --git a/3.1.7/4425_grsec-pax-without-grsec.patch b/3.1.8/4425_grsec-pax-without-grsec.patch
index 97e8837..3511545 100644
--- a/3.1.7/4425_grsec-pax-without-grsec.patch
+++ b/3.1.8/4425_grsec-pax-without-grsec.patch
@@ -36,7 +36,7 @@ diff -Naur a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
diff -Naur a/fs/exec.c b/fs/exec.c
--- a/fs/exec.c 2011-04-17 19:05:03.000000000 -0400
+++ b/fs/exec.c 2011-04-17 19:20:30.000000000 -0400
-@@ -1999,9 +1999,11 @@
+@@ -2003,9 +2003,11 @@
}
up_read(&mm->mmap_sem);
}
@@ -48,7 +48,7 @@ diff -Naur a/fs/exec.c b/fs/exec.c
printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
"PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
-@@ -2016,10 +2018,12 @@
+@@ -2020,10 +2022,12 @@
#ifdef CONFIG_PAX_REFCOUNT
void pax_report_refcount_overflow(struct pt_regs *regs)
{
@@ -61,7 +61,7 @@ diff -Naur a/fs/exec.c b/fs/exec.c
printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
current->comm, task_pid_nr(current), current_uid(), current_euid());
print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
-@@ -2078,10 +2082,12 @@
+@@ -2082,10 +2086,12 @@
NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
{
diff --git a/3.1.7/4430_grsec-kconfig-default-gids.patch b/3.1.8/4430_grsec-kconfig-default-gids.patch
index 243fbd5..243fbd5 100644
--- a/3.1.7/4430_grsec-kconfig-default-gids.patch
+++ b/3.1.8/4430_grsec-kconfig-default-gids.patch
diff --git a/3.1.7/4435_grsec-kconfig-gentoo.patch b/3.1.8/4435_grsec-kconfig-gentoo.patch
index ed58187..9ff2fe7 100644
--- a/3.1.7/4435_grsec-kconfig-gentoo.patch
+++ b/3.1.8/4435_grsec-kconfig-gentoo.patch
@@ -290,7 +290,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
diff -Naur a/security/Kconfig b/security/Kconfig
--- a/security/Kconfig 2011-12-26 12:23:44.000000000 -0500
+++ b/security/Kconfig 2011-12-26 11:14:27.000000000 -0500
-@@ -322,9 +322,10 @@
+@@ -363,9 +363,10 @@
config PAX_KERNEXEC
bool "Enforce non-executable kernel pages"
@@ -302,7 +302,7 @@ diff -Naur a/security/Kconfig b/security/Kconfig
help
This is the kernel land equivalent of PAGEEXEC and MPROTECT,
that is, enabling this option will make it harder to inject
-@@ -335,30 +336,30 @@
+@@ -376,30 +377,30 @@
choice
prompt "Return Address Instrumentation Method"
@@ -341,7 +341,7 @@ diff -Naur a/security/Kconfig b/security/Kconfig
default ""
config PAX_KERNEXEC_MODULE_TEXT
-@@ -515,8 +516,9 @@
+@@ -555,8 +556,9 @@
config PAX_MEMORY_UDEREF
bool "Prevent invalid userland pointer dereference"
diff --git a/3.1.7/4437-grsec-kconfig-proc-user.patch b/3.1.8/4437-grsec-kconfig-proc-user.patch
index 54b2678..54b2678 100644
--- a/3.1.7/4437-grsec-kconfig-proc-user.patch
+++ b/3.1.8/4437-grsec-kconfig-proc-user.patch
diff --git a/3.1.7/4440_selinux-avc_audit-log-curr_ip.patch b/3.1.8/4440_selinux-avc_audit-log-curr_ip.patch
index 9c38cfc..9c38cfc 100644
--- a/3.1.7/4440_selinux-avc_audit-log-curr_ip.patch
+++ b/3.1.8/4440_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.1.7/4445_disable-compat_vdso.patch b/3.1.8/4445_disable-compat_vdso.patch
index 737dcca..737dcca 100644
--- a/3.1.7/4445_disable-compat_vdso.patch
+++ b/3.1.8/4445_disable-compat_vdso.patch