summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2013-06-08 18:26:43 -0400
committerAnthony G. Basile <blueness@gentoo.org>2013-06-08 18:26:43 -0400
commit8d3f7ba96a49376ab175bf7ee59b0bfa42b4f434 (patch)
treec41270770f228fc60b2a16ae739cfcf5e99d99ab
parentGrsec/PaX: 2.9.1-{2.6.32.60,3.2.45,3.9.4}-201306041949 (diff)
downloadhardened-patchset-8d3f7ba96a49376ab175bf7ee59b0bfa42b4f434.tar.gz
hardened-patchset-8d3f7ba96a49376ab175bf7ee59b0bfa42b4f434.tar.bz2
hardened-patchset-8d3f7ba96a49376ab175bf7ee59b0bfa42b4f434.zip
Fix 3.2.45 -> 3.2.4620130604
-rw-r--r--3.2.46/0000_README (renamed from 3.2.45/0000_README)4
-rw-r--r--3.2.46/1021_linux-3.2.22.patch (renamed from 3.2.45/1021_linux-3.2.22.patch)0
-rw-r--r--3.2.46/1022_linux-3.2.23.patch (renamed from 3.2.45/1022_linux-3.2.23.patch)0
-rw-r--r--3.2.46/1023_linux-3.2.24.patch (renamed from 3.2.45/1023_linux-3.2.24.patch)0
-rw-r--r--3.2.46/1024_linux-3.2.25.patch (renamed from 3.2.45/1024_linux-3.2.25.patch)0
-rw-r--r--3.2.46/1025_linux-3.2.26.patch (renamed from 3.2.45/1025_linux-3.2.26.patch)0
-rw-r--r--3.2.46/1026_linux-3.2.27.patch (renamed from 3.2.45/1026_linux-3.2.27.patch)0
-rw-r--r--3.2.46/1027_linux-3.2.28.patch (renamed from 3.2.45/1027_linux-3.2.28.patch)0
-rw-r--r--3.2.46/1028_linux-3.2.29.patch (renamed from 3.2.45/1028_linux-3.2.29.patch)0
-rw-r--r--3.2.46/1029_linux-3.2.30.patch (renamed from 3.2.45/1029_linux-3.2.30.patch)0
-rw-r--r--3.2.46/1030_linux-3.2.31.patch (renamed from 3.2.45/1030_linux-3.2.31.patch)0
-rw-r--r--3.2.46/1031_linux-3.2.32.patch (renamed from 3.2.45/1031_linux-3.2.32.patch)0
-rw-r--r--3.2.46/1032_linux-3.2.33.patch (renamed from 3.2.45/1032_linux-3.2.33.patch)0
-rw-r--r--3.2.46/1033_linux-3.2.34.patch (renamed from 3.2.45/1033_linux-3.2.34.patch)0
-rw-r--r--3.2.46/1034_linux-3.2.35.patch (renamed from 3.2.45/1034_linux-3.2.35.patch)0
-rw-r--r--3.2.46/1035_linux-3.2.36.patch (renamed from 3.2.45/1035_linux-3.2.36.patch)0
-rw-r--r--3.2.46/1036_linux-3.2.37.patch (renamed from 3.2.45/1036_linux-3.2.37.patch)0
-rw-r--r--3.2.46/1037_linux-3.2.38.patch (renamed from 3.2.45/1037_linux-3.2.38.patch)0
-rw-r--r--3.2.46/1038_linux-3.2.39.patch (renamed from 3.2.45/1038_linux-3.2.39.patch)0
-rw-r--r--3.2.46/1039_linux-3.2.40.patch (renamed from 3.2.45/1039_linux-3.2.40.patch)0
-rw-r--r--3.2.46/1040_linux-3.2.41.patch (renamed from 3.2.45/1040_linux-3.2.41.patch)0
-rw-r--r--3.2.46/1041_linux-3.2.42.patch (renamed from 3.2.45/1041_linux-3.2.42.patch)0
-rw-r--r--3.2.46/1042_linux-3.2.43.patch (renamed from 3.2.45/1042_linux-3.2.43.patch)0
-rw-r--r--3.2.46/1043_linux-3.2.44.patch (renamed from 3.2.45/1043_linux-3.2.44.patch)0
-rw-r--r--3.2.46/1044_linux-3.2.45.patch (renamed from 3.2.45/1044_linux-3.2.45.patch)0
-rw-r--r--3.2.46/1045_linux-3.2.46.patch3142
-rw-r--r--3.2.46/4420_grsecurity-2.9.1-3.2.46-201306041947.patch (renamed from 3.2.45/4420_grsecurity-2.9.1-3.2.46-201306041947.patch)0
-rw-r--r--3.2.46/4425_grsec_remove_EI_PAX.patch (renamed from 3.2.45/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--3.2.46/4430_grsec-remove-localversion-grsec.patch (renamed from 3.2.45/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.2.46/4435_grsec-mute-warnings.patch (renamed from 3.2.45/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.2.46/4440_grsec-remove-protected-paths.patch (renamed from 3.2.45/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.2.46/4450_grsec-kconfig-default-gids.patch (renamed from 3.2.45/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.2.46/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.2.45/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.2.46/4470_disable-compat_vdso.patch (renamed from 3.2.45/4470_disable-compat_vdso.patch)0
-rw-r--r--3.2.46/4475_emutramp_default_on.patch (renamed from 3.2.45/4475_emutramp_default_on.patch)0
35 files changed, 3146 insertions, 0 deletions
diff --git a/3.2.45/0000_README b/3.2.46/0000_README
index 4a59301..7c63717 100644
--- a/3.2.45/0000_README
+++ b/3.2.46/0000_README
@@ -98,6 +98,10 @@ Patch: 1044_linux-3.2.45.patch
From: http://www.kernel.org
Desc: Linux 3.2.45
+Patch: 1045_linux-3.2.46.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.46
+
Patch: 4420_grsecurity-2.9.1-3.2.46-201306041947.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.45/1021_linux-3.2.22.patch b/3.2.46/1021_linux-3.2.22.patch
index e6ad93a..e6ad93a 100644
--- a/3.2.45/1021_linux-3.2.22.patch
+++ b/3.2.46/1021_linux-3.2.22.patch
diff --git a/3.2.45/1022_linux-3.2.23.patch b/3.2.46/1022_linux-3.2.23.patch
index 3d796d0..3d796d0 100644
--- a/3.2.45/1022_linux-3.2.23.patch
+++ b/3.2.46/1022_linux-3.2.23.patch
diff --git a/3.2.45/1023_linux-3.2.24.patch b/3.2.46/1023_linux-3.2.24.patch
index 4692eb4..4692eb4 100644
--- a/3.2.45/1023_linux-3.2.24.patch
+++ b/3.2.46/1023_linux-3.2.24.patch
diff --git a/3.2.45/1024_linux-3.2.25.patch b/3.2.46/1024_linux-3.2.25.patch
index e95c213..e95c213 100644
--- a/3.2.45/1024_linux-3.2.25.patch
+++ b/3.2.46/1024_linux-3.2.25.patch
diff --git a/3.2.45/1025_linux-3.2.26.patch b/3.2.46/1025_linux-3.2.26.patch
index 44065b9..44065b9 100644
--- a/3.2.45/1025_linux-3.2.26.patch
+++ b/3.2.46/1025_linux-3.2.26.patch
diff --git a/3.2.45/1026_linux-3.2.27.patch b/3.2.46/1026_linux-3.2.27.patch
index 5878eb4..5878eb4 100644
--- a/3.2.45/1026_linux-3.2.27.patch
+++ b/3.2.46/1026_linux-3.2.27.patch
diff --git a/3.2.45/1027_linux-3.2.28.patch b/3.2.46/1027_linux-3.2.28.patch
index 4dbba4b..4dbba4b 100644
--- a/3.2.45/1027_linux-3.2.28.patch
+++ b/3.2.46/1027_linux-3.2.28.patch
diff --git a/3.2.45/1028_linux-3.2.29.patch b/3.2.46/1028_linux-3.2.29.patch
index 3c65179..3c65179 100644
--- a/3.2.45/1028_linux-3.2.29.patch
+++ b/3.2.46/1028_linux-3.2.29.patch
diff --git a/3.2.45/1029_linux-3.2.30.patch b/3.2.46/1029_linux-3.2.30.patch
index 86aea4b..86aea4b 100644
--- a/3.2.45/1029_linux-3.2.30.patch
+++ b/3.2.46/1029_linux-3.2.30.patch
diff --git a/3.2.45/1030_linux-3.2.31.patch b/3.2.46/1030_linux-3.2.31.patch
index c6accf5..c6accf5 100644
--- a/3.2.45/1030_linux-3.2.31.patch
+++ b/3.2.46/1030_linux-3.2.31.patch
diff --git a/3.2.45/1031_linux-3.2.32.patch b/3.2.46/1031_linux-3.2.32.patch
index 247fc0b..247fc0b 100644
--- a/3.2.45/1031_linux-3.2.32.patch
+++ b/3.2.46/1031_linux-3.2.32.patch
diff --git a/3.2.45/1032_linux-3.2.33.patch b/3.2.46/1032_linux-3.2.33.patch
index c32fb75..c32fb75 100644
--- a/3.2.45/1032_linux-3.2.33.patch
+++ b/3.2.46/1032_linux-3.2.33.patch
diff --git a/3.2.45/1033_linux-3.2.34.patch b/3.2.46/1033_linux-3.2.34.patch
index d647b38..d647b38 100644
--- a/3.2.45/1033_linux-3.2.34.patch
+++ b/3.2.46/1033_linux-3.2.34.patch
diff --git a/3.2.45/1034_linux-3.2.35.patch b/3.2.46/1034_linux-3.2.35.patch
index 76a9c19..76a9c19 100644
--- a/3.2.45/1034_linux-3.2.35.patch
+++ b/3.2.46/1034_linux-3.2.35.patch
diff --git a/3.2.45/1035_linux-3.2.36.patch b/3.2.46/1035_linux-3.2.36.patch
index 5d192a3..5d192a3 100644
--- a/3.2.45/1035_linux-3.2.36.patch
+++ b/3.2.46/1035_linux-3.2.36.patch
diff --git a/3.2.45/1036_linux-3.2.37.patch b/3.2.46/1036_linux-3.2.37.patch
index ad13251..ad13251 100644
--- a/3.2.45/1036_linux-3.2.37.patch
+++ b/3.2.46/1036_linux-3.2.37.patch
diff --git a/3.2.45/1037_linux-3.2.38.patch b/3.2.46/1037_linux-3.2.38.patch
index a3c106f..a3c106f 100644
--- a/3.2.45/1037_linux-3.2.38.patch
+++ b/3.2.46/1037_linux-3.2.38.patch
diff --git a/3.2.45/1038_linux-3.2.39.patch b/3.2.46/1038_linux-3.2.39.patch
index 5639e92..5639e92 100644
--- a/3.2.45/1038_linux-3.2.39.patch
+++ b/3.2.46/1038_linux-3.2.39.patch
diff --git a/3.2.45/1039_linux-3.2.40.patch b/3.2.46/1039_linux-3.2.40.patch
index f26b39c..f26b39c 100644
--- a/3.2.45/1039_linux-3.2.40.patch
+++ b/3.2.46/1039_linux-3.2.40.patch
diff --git a/3.2.45/1040_linux-3.2.41.patch b/3.2.46/1040_linux-3.2.41.patch
index 0d27fcb..0d27fcb 100644
--- a/3.2.45/1040_linux-3.2.41.patch
+++ b/3.2.46/1040_linux-3.2.41.patch
diff --git a/3.2.45/1041_linux-3.2.42.patch b/3.2.46/1041_linux-3.2.42.patch
index 77a08ed..77a08ed 100644
--- a/3.2.45/1041_linux-3.2.42.patch
+++ b/3.2.46/1041_linux-3.2.42.patch
diff --git a/3.2.45/1042_linux-3.2.43.patch b/3.2.46/1042_linux-3.2.43.patch
index a3f878b..a3f878b 100644
--- a/3.2.45/1042_linux-3.2.43.patch
+++ b/3.2.46/1042_linux-3.2.43.patch
diff --git a/3.2.45/1043_linux-3.2.44.patch b/3.2.46/1043_linux-3.2.44.patch
index 3d5e6ff..3d5e6ff 100644
--- a/3.2.45/1043_linux-3.2.44.patch
+++ b/3.2.46/1043_linux-3.2.44.patch
diff --git a/3.2.45/1044_linux-3.2.45.patch b/3.2.46/1044_linux-3.2.45.patch
index 44e1767..44e1767 100644
--- a/3.2.45/1044_linux-3.2.45.patch
+++ b/3.2.46/1044_linux-3.2.45.patch
diff --git a/3.2.46/1045_linux-3.2.46.patch b/3.2.46/1045_linux-3.2.46.patch
new file mode 100644
index 0000000..bc10efd
--- /dev/null
+++ b/3.2.46/1045_linux-3.2.46.patch
@@ -0,0 +1,3142 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 897f223..2ba8272 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -734,6 +734,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ edd= [EDD]
+ Format: {"off" | "on" | "skip[mbr]"}
+
++ efi_no_storage_paranoia [EFI; X86]
++ Using this parameter you can use more than 50% of
++ your efi variable storage. Use this parameter only if
++ you are really sure that your UEFI does sane gc and
++ fulfills the spec otherwise your board may brick.
++
+ eisa_irq_edge= [PARISC,HW]
+ See header of drivers/parisc/eisa.c.
+
+diff --git a/Makefile b/Makefile
+index 9072fee..f600582 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 45
++SUBLEVEL = 46
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/configs/at91sam9g45_defconfig b/arch/arm/configs/at91sam9g45_defconfig
+index 606d48f..8aab786 100644
+--- a/arch/arm/configs/at91sam9g45_defconfig
++++ b/arch/arm/configs/at91sam9g45_defconfig
+@@ -173,7 +173,6 @@ CONFIG_MMC=y
+ # CONFIG_MMC_BLOCK_BOUNCE is not set
+ CONFIG_SDIO_UART=m
+ CONFIG_MMC_ATMELMCI=y
+-CONFIG_MMC_ATMELMCI_DMA=y
+ CONFIG_LEDS_ATMEL_PWM=y
+ CONFIG_LEDS_GPIO=y
+ CONFIG_LEDS_TRIGGER_TIMER=y
+diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c
+index 262c034..3d0737e 100644
+--- a/arch/arm/mach-kirkwood/ts219-setup.c
++++ b/arch/arm/mach-kirkwood/ts219-setup.c
+@@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void)
+ static int __init ts219_pci_init(void)
+ {
+ if (machine_is_ts219())
+- kirkwood_pcie_init(KW_PCIE0);
++ kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0);
+
+ return 0;
+ }
+diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
+index 8a6886a..c72b083 100644
+--- a/arch/arm/plat-orion/common.c
++++ b/arch/arm/plat-orion/common.c
+@@ -347,7 +347,7 @@ static struct resource orion_ge10_shared_resources[] = {
+
+ static struct platform_device orion_ge10_shared = {
+ .name = MV643XX_ETH_SHARED_NAME,
+- .id = 1,
++ .id = 2,
+ .dev = {
+ .platform_data = &orion_ge10_shared_data,
+ },
+@@ -362,8 +362,8 @@ static struct resource orion_ge10_resources[] = {
+
+ static struct platform_device orion_ge10 = {
+ .name = MV643XX_ETH_NAME,
+- .id = 1,
+- .num_resources = 2,
++ .id = 2,
++ .num_resources = 1,
+ .resource = orion_ge10_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+@@ -401,7 +401,7 @@ static struct resource orion_ge11_shared_resources[] = {
+
+ static struct platform_device orion_ge11_shared = {
+ .name = MV643XX_ETH_SHARED_NAME,
+- .id = 1,
++ .id = 3,
+ .dev = {
+ .platform_data = &orion_ge11_shared_data,
+ },
+@@ -416,8 +416,8 @@ static struct resource orion_ge11_resources[] = {
+
+ static struct platform_device orion_ge11 = {
+ .name = MV643XX_ETH_NAME,
+- .id = 1,
+- .num_resources = 2,
++ .id = 3,
++ .num_resources = 1,
+ .resource = orion_ge11_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
+index 19973b0..59e4cc9 100644
+--- a/arch/avr32/configs/favr-32_defconfig
++++ b/arch/avr32/configs/favr-32_defconfig
+@@ -122,7 +122,6 @@ CONFIG_USB_G_SERIAL=m
+ CONFIG_USB_CDC_COMPOSITE=m
+ CONFIG_MMC=y
+ CONFIG_MMC_ATMELMCI=y
+-CONFIG_MMC_ATMELMCI_DMA=y
+ CONFIG_NEW_LEDS=y
+ CONFIG_LEDS_CLASS=y
+ CONFIG_LEDS_ATMEL_PWM=m
+diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig
+index 3befab9..65de443 100644
+--- a/arch/avr32/configs/merisc_defconfig
++++ b/arch/avr32/configs/merisc_defconfig
+@@ -102,7 +102,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
+ CONFIG_LOGO=y
+ CONFIG_MMC=y
+ CONFIG_MMC_ATMELMCI=y
+-CONFIG_MMC_ATMELMCI_DMA=y
+ CONFIG_NEW_LEDS=y
+ CONFIG_LEDS_CLASS=y
+ CONFIG_LEDS_ATMEL_PWM=y
+diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
+index 596f730..2c94129 100644
+--- a/arch/avr32/kernel/module.c
++++ b/arch/avr32/kernel/module.c
+@@ -264,7 +264,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
+ break;
+ case R_AVR32_GOT18SW:
+ if ((relocation & 0xfffe0003) != 0
+- && (relocation & 0xfffc0003) != 0xffff0000)
++ && (relocation & 0xfffc0000) != 0xfffc0000)
+ return reloc_overflow(module, "R_AVR32_GOT18SW",
+ relocation);
+ relocation >>= 2;
+diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
+index 41f69ae..8c3efd2 100644
+--- a/arch/powerpc/include/asm/rtas.h
++++ b/arch/powerpc/include/asm/rtas.h
+@@ -230,6 +230,8 @@ extern void rtas_progress(char *s, unsigned short hex);
+ extern void rtas_initialize(void);
+ extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
+ extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
++extern int rtas_online_cpus_mask(cpumask_var_t cpus);
++extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
+ extern int rtas_ibm_suspend_me(struct rtas_args *);
+
+ struct rtc_time;
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index 517b1d8..434a180 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -19,6 +19,7 @@
+ #include <linux/init.h>
+ #include <linux/capability.h>
+ #include <linux/delay.h>
++#include <linux/cpu.h>
+ #include <linux/smp.h>
+ #include <linux/completion.h>
+ #include <linux/cpumask.h>
+@@ -716,7 +717,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
+ int cpu;
+
+ slb_set_size(SLB_MIN_SIZE);
+- stop_topology_update();
+ printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
+
+ while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
+@@ -732,7 +732,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
+ rc = atomic_read(&data->error);
+
+ atomic_set(&data->error, rc);
+- start_topology_update();
+ pSeries_coalesce_init();
+
+ if (wake_when_done) {
+@@ -811,6 +810,95 @@ static void rtas_percpu_suspend_me(void *info)
+ __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
+ }
+
++enum rtas_cpu_state {
++ DOWN,
++ UP,
++};
++
++#ifndef CONFIG_SMP
++static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
++ cpumask_var_t cpus)
++{
++ if (!cpumask_empty(cpus)) {
++ cpumask_clear(cpus);
++ return -EINVAL;
++ } else
++ return 0;
++}
++#else
++/* On return cpumask will be altered to indicate CPUs changed.
++ * CPUs with states changed will be set in the mask,
++ * CPUs with status unchanged will be unset in the mask. */
++static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
++ cpumask_var_t cpus)
++{
++ int cpu;
++ int cpuret = 0;
++ int ret = 0;
++
++ if (cpumask_empty(cpus))
++ return 0;
++
++ for_each_cpu(cpu, cpus) {
++ switch (state) {
++ case DOWN:
++ cpuret = cpu_down(cpu);
++ break;
++ case UP:
++ cpuret = cpu_up(cpu);
++ break;
++ }
++ if (cpuret) {
++ pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
++ __func__,
++ ((state == UP) ? "up" : "down"),
++ cpu, cpuret);
++ if (!ret)
++ ret = cpuret;
++ if (state == UP) {
++ /* clear bits for unchanged cpus, return */
++ cpumask_shift_right(cpus, cpus, cpu);
++ cpumask_shift_left(cpus, cpus, cpu);
++ break;
++ } else {
++ /* clear bit for unchanged cpu, continue */
++ cpumask_clear_cpu(cpu, cpus);
++ }
++ }
++ }
++
++ return ret;
++}
++#endif
++
++int rtas_online_cpus_mask(cpumask_var_t cpus)
++{
++ int ret;
++
++ ret = rtas_cpu_state_change_mask(UP, cpus);
++
++ if (ret) {
++ cpumask_var_t tmp_mask;
++
++ if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
++ return ret;
++
++ /* Use tmp_mask to preserve cpus mask from first failure */
++ cpumask_copy(tmp_mask, cpus);
++ rtas_offline_cpus_mask(tmp_mask);
++ free_cpumask_var(tmp_mask);
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(rtas_online_cpus_mask);
++
++int rtas_offline_cpus_mask(cpumask_var_t cpus)
++{
++ return rtas_cpu_state_change_mask(DOWN, cpus);
++}
++EXPORT_SYMBOL(rtas_offline_cpus_mask);
++
+ int rtas_ibm_suspend_me(struct rtas_args *args)
+ {
+ long state;
+@@ -818,6 +906,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ struct rtas_suspend_me_data data;
+ DECLARE_COMPLETION_ONSTACK(done);
++ cpumask_var_t offline_mask;
++ int cpuret;
+
+ if (!rtas_service_present("ibm,suspend-me"))
+ return -ENOSYS;
+@@ -841,12 +931,26 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
+ return 0;
+ }
+
++ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
++ return -ENOMEM;
++
+ atomic_set(&data.working, 0);
+ atomic_set(&data.done, 0);
+ atomic_set(&data.error, 0);
+ data.token = rtas_token("ibm,suspend-me");
+ data.complete = &done;
+
++ /* All present CPUs must be online */
++ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
++ cpuret = rtas_online_cpus_mask(offline_mask);
++ if (cpuret) {
++ pr_err("%s: Could not bring present CPUs online.\n", __func__);
++ atomic_set(&data.error, cpuret);
++ goto out;
++ }
++
++ stop_topology_update();
++
+ /* Call function on all CPUs. One of us will make the
+ * rtas call
+ */
+@@ -858,6 +962,16 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
+ if (atomic_read(&data.error) != 0)
+ printk(KERN_ERR "Error doing global join\n");
+
++ start_topology_update();
++
++ /* Take down CPUs not online prior to suspend */
++ cpuret = rtas_offline_cpus_mask(offline_mask);
++ if (cpuret)
++ pr_warn("%s: Could not restore CPUs to offline state.\n",
++ __func__);
++
++out:
++ free_cpumask_var(offline_mask);
+ return atomic_read(&data.error);
+ }
+ #else /* CONFIG_PPC_PSERIES */
+diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
+index d3de084..55a4771 100644
+--- a/arch/powerpc/platforms/pseries/suspend.c
++++ b/arch/powerpc/platforms/pseries/suspend.c
+@@ -16,6 +16,7 @@
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
++#include <linux/cpu.h>
+ #include <linux/delay.h>
+ #include <linux/suspend.h>
+ #include <linux/stat.h>
+@@ -24,6 +25,7 @@
+ #include <asm/machdep.h>
+ #include <asm/mmu.h>
+ #include <asm/rtas.h>
++#include <asm/topology.h>
+
+ static u64 stream_id;
+ static struct sys_device suspend_sysdev;
+@@ -125,11 +127,15 @@ static ssize_t store_hibernate(struct sysdev_class *classdev,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+ {
++ cpumask_var_t offline_mask;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
++ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
++ return -ENOMEM;
++
+ stream_id = simple_strtoul(buf, NULL, 16);
+
+ do {
+@@ -138,13 +144,33 @@ static ssize_t store_hibernate(struct sysdev_class *classdev,
+ ssleep(1);
+ } while (rc == -EAGAIN);
+
+- if (!rc)
++ if (!rc) {
++ /* All present CPUs must be online */
++ cpumask_andnot(offline_mask, cpu_present_mask,
++ cpu_online_mask);
++ rc = rtas_online_cpus_mask(offline_mask);
++ if (rc) {
++ pr_err("%s: Could not bring present CPUs online.\n",
++ __func__);
++ goto out;
++ }
++
++ stop_topology_update();
+ rc = pm_suspend(PM_SUSPEND_MEM);
++ start_topology_update();
++
++ /* Take down CPUs not online prior to suspend */
++ if (!rtas_offline_cpus_mask(offline_mask))
++ pr_warn("%s: Could not restore CPUs to offline "
++ "state.\n", __func__);
++ }
+
+ stream_id = 0;
+
+ if (!rc)
+ rc = count;
++out:
++ free_cpumask_var(offline_mask);
+ return rc;
+ }
+
+diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
+index aa365c5..5888f1b 100644
+--- a/arch/um/include/asm/pgtable.h
++++ b/arch/um/include/asm/pgtable.h
+@@ -69,6 +69,8 @@ extern unsigned long end_iomem;
+ #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+ #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
+
++#define io_remap_pfn_range remap_pfn_range
++
+ /*
+ * The i386 can't do page protection for execute, and considers that the same
+ * are read.
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index 429e0c9..fb2eb32 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -160,10 +160,6 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
+ u64 arch_irq_stat(void)
+ {
+ u64 sum = atomic_read(&irq_err_count);
+-
+-#ifdef CONFIG_X86_IO_APIC
+- sum += atomic_read(&irq_mis_count);
+-#endif
+ return sum;
+ }
+
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 407789b..aac5ea7 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -4882,6 +4882,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
+ if (err != EMULATE_DONE)
+ return 0;
+
++ if (vcpu->arch.halt_request) {
++ vcpu->arch.halt_request = 0;
++ ret = kvm_emulate_halt(vcpu);
++ goto out;
++ }
++
+ if (signal_pending(current))
+ goto out;
+ if (need_resched())
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 1de542b..07ef7e8 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -101,6 +101,15 @@ static int __init setup_add_efi_memmap(char *arg)
+ }
+ early_param("add_efi_memmap", setup_add_efi_memmap);
+
++static bool efi_no_storage_paranoia;
++
++static int __init setup_storage_paranoia(char *arg)
++{
++ efi_no_storage_paranoia = true;
++ return 0;
++}
++early_param("efi_no_storage_paranoia", setup_storage_paranoia);
++
+
+ static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+ {
+@@ -815,3 +824,37 @@ u64 efi_mem_attributes(unsigned long phys_addr)
+ }
+ return 0;
+ }
++
++/*
++ * Some firmware has serious problems when using more than 50% of the EFI
++ * variable store, i.e. it triggers bugs that can brick machines. Ensure that
++ * we never use more than this safe limit.
++ *
++ * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
++ * store.
++ */
++efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
++{
++ efi_status_t status;
++ u64 storage_size, remaining_size, max_size;
++
++ status = efi.query_variable_info(attributes, &storage_size,
++ &remaining_size, &max_size);
++ if (status != EFI_SUCCESS)
++ return status;
++
++ if (!max_size && remaining_size > size)
++ printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
++ " is returning MaxVariableSize=0\n");
++
++ if (!storage_size || size > remaining_size ||
++ (max_size && size > max_size))
++ return EFI_OUT_OF_RESOURCES;
++
++ if (!efi_no_storage_paranoia &&
++ (remaining_size - size) < (storage_size / 2))
++ return EFI_OUT_OF_RESOURCES;
++
++ return EFI_SUCCESS;
++}
++EXPORT_SYMBOL_GPL(efi_query_variable_store);
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 044f5d9..5189fe8 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -129,6 +129,21 @@ static void xen_vcpu_setup(int cpu)
+
+ BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
+
++ /*
++ * This path is called twice on PVHVM - first during bootup via
++ * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
++ * hotplugged: cpu_up -> xen_hvm_cpu_notify.
++ * As we can only do the VCPUOP_register_vcpu_info once lets
++ * not over-write its result.
++ *
++ * For PV it is called during restore (xen_vcpu_restore) and bootup
++ * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
++ * use this function.
++ */
++ if (xen_hvm_domain()) {
++ if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
++ return;
++ }
+ if (cpu < MAX_VIRT_CPUS)
+ per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+
+diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
+index f915a7f..b334f54 100644
+--- a/drivers/acpi/acpica/exfldio.c
++++ b/drivers/acpi/acpica/exfldio.c
+@@ -702,7 +702,19 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
+
+ if ((obj_desc->common_field.start_field_bit_offset == 0) &&
+ (obj_desc->common_field.bit_length == access_bit_width)) {
+- status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ);
++ if (buffer_length >= sizeof(u64)) {
++ status =
++ acpi_ex_field_datum_io(obj_desc, 0, buffer,
++ ACPI_READ);
++ } else {
++ /* Use raw_datum (u64) to handle buffers < 64 bits */
++
++ status =
++ acpi_ex_field_datum_io(obj_desc, 0, &raw_datum,
++ ACPI_READ);
++ ACPI_MEMCPY(buffer, &raw_datum, buffer_length);
++ }
++
+ return_ACPI_STATUS(status);
+ }
+
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index d2519b2..51de186 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -217,7 +217,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
+ static int ec_poll(struct acpi_ec *ec)
+ {
+ unsigned long flags;
+- int repeat = 2; /* number of command restarts */
++ int repeat = 5; /* number of command restarts */
+ while (repeat--) {
+ unsigned long delay = jiffies +
+ msecs_to_jiffies(ec_delay);
+@@ -235,8 +235,6 @@ static int ec_poll(struct acpi_ec *ec)
+ }
+ advance_transaction(ec, acpi_ec_read_status(ec));
+ } while (time_before(jiffies, delay));
+- if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
+- break;
+ pr_debug(PREFIX "controller reset, restart transaction\n");
+ spin_lock_irqsave(&ec->curr_lock, flags);
+ start_transaction(ec);
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index d9c0199..53e28a9 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -164,6 +164,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
+ },
+ },
++ {
++ .callback = video_detect_force_vendor,
++ .ident = "Asus UL30A",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
++ },
++ },
+ { },
+ };
+
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index d22119d..968a0d4 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
+
+ spin_lock(&brd->brd_lock);
+ idx = sector >> PAGE_SECTORS_SHIFT;
++ page->index = idx;
+ if (radix_tree_insert(&brd->brd_pages, idx, page)) {
+ __free_page(page);
+ page = radix_tree_lookup(&brd->brd_pages, idx);
+ BUG_ON(!page);
+ BUG_ON(page->index != idx);
+- } else
+- page->index = idx;
++ }
+ spin_unlock(&brd->brd_lock);
+
+ radix_tree_preload_end();
+diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
+index 43beaca..13cbdd3 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -2225,7 +2225,6 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
+ if (hg == -1 && mdev->state.role == R_PRIMARY) {
+ enum drbd_state_rv rv2;
+
+- drbd_set_role(mdev, R_SECONDARY, 0);
+ /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
+ * we might be here in C_WF_REPORT_PARAMS which is transient.
+ * we do not need to wait for the after state change work either. */
+diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
+index 3ed20e8..92ce302 100644
+--- a/drivers/char/ipmi/ipmi_bt_sm.c
++++ b/drivers/char/ipmi/ipmi_bt_sm.c
+@@ -95,9 +95,9 @@ struct si_sm_data {
+ enum bt_states state;
+ unsigned char seq; /* BT sequence number */
+ struct si_sm_io *io;
+- unsigned char write_data[IPMI_MAX_MSG_LENGTH];
++ unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+ int write_count;
+- unsigned char read_data[IPMI_MAX_MSG_LENGTH];
++ unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+ int read_count;
+ int truncated;
+ long timeout; /* microseconds countdown */
+diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
+index 2aa3977..8dde1f5 100644
+--- a/drivers/char/ipmi/ipmi_devintf.c
++++ b/drivers/char/ipmi/ipmi_devintf.c
+@@ -838,13 +838,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+ return ipmi_ioctl(filep, cmd, arg);
+ }
+ }
++
++static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
++ unsigned long arg)
++{
++ int ret;
++
++ mutex_lock(&ipmi_mutex);
++ ret = compat_ipmi_ioctl(filep, cmd, arg);
++ mutex_unlock(&ipmi_mutex);
++
++ return ret;
++}
+ #endif
+
+ static const struct file_operations ipmi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
+ #ifdef CONFIG_COMPAT
+- .compat_ioctl = compat_ipmi_ioctl,
++ .compat_ioctl = unlocked_compat_ipmi_ioctl,
+ #endif
+ .open = ipmi_open,
+ .release = ipmi_release,
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 8ae9235..b651733 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -889,16 +889,24 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
+ if (r->entropy_count / 8 < min + reserved) {
+ nbytes = 0;
+ } else {
++ int entropy_count, orig;
++retry:
++ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ /* If limited, never pull more than available */
+- if (r->limit && nbytes + reserved >= r->entropy_count / 8)
+- nbytes = r->entropy_count/8 - reserved;
+-
+- if (r->entropy_count / 8 >= nbytes + reserved)
+- r->entropy_count -= nbytes*8;
+- else
+- r->entropy_count = reserved;
++ if (r->limit && nbytes + reserved >= entropy_count / 8)
++ nbytes = entropy_count/8 - reserved;
++
++ if (entropy_count / 8 >= nbytes + reserved) {
++ entropy_count -= nbytes*8;
++ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
++ goto retry;
++ } else {
++ entropy_count = reserved;
++ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
++ goto retry;
++ }
+
+- if (r->entropy_count < random_write_wakeup_thresh) {
++ if (entropy_count < random_write_wakeup_thresh) {
+ wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
+diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
+index 629c430..49138e7 100644
+--- a/drivers/dma/pch_dma.c
++++ b/drivers/dma/pch_dma.c
+@@ -489,7 +489,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
+ dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
+
+ if (!ret) {
+- ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
++ ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
+ if (ret) {
+ spin_lock(&pd_chan->lock);
+ pd_chan->descs_allocated++;
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index b15c0aa..2a64e69 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -425,24 +425,12 @@ static efi_status_t
+ check_var_size_locked(struct efivars *efivars, u32 attributes,
+ unsigned long size)
+ {
+- u64 storage_size, remaining_size, max_size;
+- efi_status_t status;
+ const struct efivar_operations *fops = efivars->ops;
+
+- if (!efivars->ops->query_variable_info)
++ if (!efivars->ops->query_variable_store)
+ return EFI_UNSUPPORTED;
+
+- status = fops->query_variable_info(attributes, &storage_size,
+- &remaining_size, &max_size);
+-
+- if (status != EFI_SUCCESS)
+- return status;
+-
+- if (!storage_size || size > remaining_size || size > max_size ||
+- (remaining_size - size) < (storage_size / 2))
+- return EFI_OUT_OF_RESOURCES;
+-
+- return status;
++ return fops->query_variable_store(attributes, size);
+ }
+
+ static ssize_t
+@@ -1456,7 +1444,7 @@ efivars_init(void)
+ ops.get_variable = efi.get_variable;
+ ops.set_variable = efi.set_variable;
+ ops.get_next_variable = efi.get_next_variable;
+- ops.query_variable_info = efi.query_variable_info;
++ ops.query_variable_store = efi_query_variable_store;
+ error = register_efivars(&__efivars, &ops, efi_kobj);
+ if (error)
+ goto err_put;
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index c77fc67..ca67338 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1007,50 +1007,56 @@ intel_teardown_mchbar(struct drm_device *dev)
+ release_resource(&dev_priv->mch_res);
+ }
+
+-static unsigned long i915_stolen_to_physical(struct drm_device *dev)
++#define PTE_ADDRESS_MASK 0xfffff000
++#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
++#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
++#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
++#define PTE_MAPPING_TYPE_CACHED (3 << 1)
++#define PTE_MAPPING_TYPE_MASK (3 << 1)
++#define PTE_VALID (1 << 0)
++
++/**
++ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
++ * a physical one
++ * @dev: drm device
++ * @offset: address to translate
++ *
++ * Some chip functions require allocations from stolen space and need the
++ * physical address of the memory in question.
++ */
++static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
++#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+- * is unreliable, so on those compute the base by subtracting the
+- * stolen memory from the Top of Low Usable DRAM which is where the
+- * BIOS places the graphics stolen memory.
+- *
+- * On gen2, the layout is slightly different with the Graphics Segment
+- * immediately following Top of Memory (or Top of Usable DRAM). Note
+- * it appears that TOUD is only reported by 865g, so we just use the
+- * top of memory as determined by the e820 probe.
+- *
+- * XXX gen2 requires an unavailable symbol and 945gm fails with
+- * its value of TOLUD.
++ * is unreliable, so compute the base by subtracting the stolen memory
++ * from the Top of Low Usable DRAM which is where the BIOS places
++ * the graphics stolen memory.
+ */
+- base = 0;
+- if (INTEL_INFO(dev)->gen >= 6) {
+- /* Read Base Data of Stolen Memory Register (BDSM) directly.
+- * Note that there is also a MCHBAR miror at 0x1080c0 or
+- * we could use device 2:0x5c instead.
+- */
+- pci_read_config_dword(pdev, 0xB0, &base);
+- base &= ~4095; /* lower bits used for locking register */
+- } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+- /* Read Graphics Base of Stolen Memory directly */
++ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
++ /* top 32bits are reserved = 0 */
+ pci_read_config_dword(pdev, 0xA4, &base);
+-#if 0
+- } else if (IS_GEN3(dev)) {
++ } else {
++ /* XXX presume 8xx is the same as i915 */
++ pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
++ }
++#else
++ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
++ u16 val;
++ pci_read_config_word(pdev, 0xb0, &val);
++ base = val >> 4 << 20;
++ } else {
+ u8 val;
+- /* Stolen is immediately below Top of Low Usable DRAM */
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
+- base -= dev_priv->mm.gtt->stolen_size;
+- } else {
+- /* Stolen is immediately above Top of Memory */
+- base = max_low_pfn_mapped << PAGE_SHIFT;
+-#endif
+ }
++ base -= dev_priv->mm.gtt->stolen_size;
++#endif
+
+- return base;
++ return base + offset;
+ }
+
+ static void i915_warn_stolen(struct drm_device *dev)
+@@ -1075,7 +1081,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ if (!compressed_fb)
+ goto err;
+
+- cfb_base = dev_priv->mm.stolen_base + compressed_fb->start;
++ cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+ if (!cfb_base)
+ goto err_fb;
+
+@@ -1088,7 +1094,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ if (!compressed_llb)
+ goto err_fb;
+
+- ll_base = dev_priv->mm.stolen_base + compressed_llb->start;
++ ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+ if (!ll_base)
+ goto err_llb;
+ }
+@@ -1107,7 +1113,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ }
+
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+- (long)cfb_base, (long)ll_base, size >> 20);
++ cfb_base, ll_base, size >> 20);
+ return;
+
+ err_llb:
+@@ -1181,13 +1187,6 @@ static int i915_load_gem_init(struct drm_device *dev)
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+- dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+- if (dev_priv->mm.stolen_base == 0)
+- return 0;
+-
+- DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n",
+- dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base);
+-
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 20cd295..144d37c 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -581,7 +581,6 @@ typedef struct drm_i915_private {
+ unsigned long gtt_start;
+ unsigned long gtt_mappable_end;
+ unsigned long gtt_end;
+- unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+ struct io_mapping *gtt_mapping;
+ int gtt_mtrr;
+diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
+index 1fe98b4..9aa02be 100644
+--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
++++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
+@@ -74,7 +74,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
+ OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
+
+ for (i = 0; i < nr; ++i) {
+- if (DRM_COPY_FROM_USER_UNCHECKED
++ if (DRM_COPY_FROM_USER
+ (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
+ DRM_ERROR("copy cliprect faulted\n");
+ return -EFAULT;
+diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
+index 65a35cf..61ab615 100644
+--- a/drivers/hwmon/abituguru.c
++++ b/drivers/hwmon/abituguru.c
+@@ -1280,14 +1280,18 @@ static int __devinit abituguru_probe(struct platform_device *pdev)
+ pr_info("found Abit uGuru\n");
+
+ /* Register sysfs hooks */
+- for (i = 0; i < sysfs_attr_i; i++)
+- if (device_create_file(&pdev->dev,
+- &data->sysfs_attr[i].dev_attr))
++ for (i = 0; i < sysfs_attr_i; i++) {
++ res = device_create_file(&pdev->dev,
++ &data->sysfs_attr[i].dev_attr);
++ if (res)
+ goto abituguru_probe_error;
+- for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++)
+- if (device_create_file(&pdev->dev,
+- &abituguru_sysfs_attr[i].dev_attr))
++ }
++ for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) {
++ res = device_create_file(&pdev->dev,
++ &abituguru_sysfs_attr[i].dev_attr);
++ if (res)
+ goto abituguru_probe_error;
++ }
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (!IS_ERR(data->hwmon_dev))
+diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
+index 6193349..3c2812f 100644
+--- a/drivers/i2c/busses/i2c-designware-core.c
++++ b/drivers/i2c/busses/i2c-designware-core.c
+@@ -349,7 +349,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
+ /* Enable the adapter */
+ dw_writel(dev, 1, DW_IC_ENABLE);
+
+- /* Enable interrupts */
++ /* Clear and enable interrupts */
++ i2c_dw_clear_int(dev);
+ dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
+ }
+
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 0a6806f..a5dfcc0 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -322,6 +322,9 @@ static void __cache_size_refresh(void)
+ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
+ enum data_mode *data_mode)
+ {
++ unsigned noio_flag;
++ void *ptr;
++
+ if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
+ *data_mode = DATA_MODE_SLAB;
+ return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
+@@ -335,7 +338,28 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
+ }
+
+ *data_mode = DATA_MODE_VMALLOC;
+- return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
++
++ /*
++ * __vmalloc allocates the data pages and auxiliary structures with
++ * gfp_flags that were specified, but pagetables are always allocated
++ * with GFP_KERNEL, no matter what was specified as gfp_mask.
++ *
++ * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
++ * all allocations done by this process (including pagetables) are done
++ * as if GFP_NOIO was specified.
++ */
++
++ if (gfp_mask & __GFP_NORETRY) {
++ noio_flag = current->flags & PF_MEMALLOC;
++ current->flags |= PF_MEMALLOC;
++ }
++
++ ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
++
++ if (gfp_mask & __GFP_NORETRY)
++ current->flags = (current->flags & ~PF_MEMALLOC) | noio_flag;
++
++ return ptr;
+ }
+
+ /*
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 34ec2b5..b4aaa7b 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1117,6 +1117,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
+ if (!s->pending_pool) {
+ ti->error = "Could not allocate mempool for pending exceptions";
++ r = -ENOMEM;
+ goto bad_pending_pool;
+ }
+
+diff --git a/drivers/media/dvb/mantis/mantis_dvb.c b/drivers/media/dvb/mantis/mantis_dvb.c
+index e5180e4..5d15c6b 100644
+--- a/drivers/media/dvb/mantis/mantis_dvb.c
++++ b/drivers/media/dvb/mantis/mantis_dvb.c
+@@ -248,8 +248,10 @@ int __devinit mantis_dvb_init(struct mantis_pci *mantis)
+ err5:
+ tasklet_kill(&mantis->tasklet);
+ dvb_net_release(&mantis->dvbnet);
+- dvb_unregister_frontend(mantis->fe);
+- dvb_frontend_detach(mantis->fe);
++ if (mantis->fe) {
++ dvb_unregister_frontend(mantis->fe);
++ dvb_frontend_detach(mantis->fe);
++ }
+ err4:
+ mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem);
+
+diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
+index 8d816cc..105f820 100644
+--- a/drivers/mfd/adp5520.c
++++ b/drivers/mfd/adp5520.c
+@@ -36,6 +36,7 @@ struct adp5520_chip {
+ struct blocking_notifier_head notifier_list;
+ int irq;
+ unsigned long id;
++ uint8_t mode;
+ };
+
+ static int __adp5520_read(struct i2c_client *client,
+@@ -326,7 +327,10 @@ static int adp5520_suspend(struct device *dev)
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
+
+- adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
++ adp5520_read(chip->dev, ADP5520_MODE_STATUS, &chip->mode);
++ /* All other bits are W1C */
++ chip->mode &= ADP5520_BL_EN | ADP5520_DIM_EN | ADP5520_nSTNBY;
++ adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0);
+ return 0;
+ }
+
+@@ -335,7 +339,7 @@ static int adp5520_resume(struct device *dev)
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
+
+- adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
++ adp5520_write(chip->dev, ADP5520_MODE_STATUS, chip->mode);
+ return 0;
+ }
+ #endif
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index fb7c27f..c1aec06 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -363,13 +363,13 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
+ ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+ card->ext_csd.raw_trim_mult =
+ ext_csd[EXT_CSD_TRIM_MULT];
++ card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
+ if (card->ext_csd.rev >= 4) {
+ /*
+ * Enhanced area feature support -- check whether the eMMC
+ * card has the Enhanced area enabled. If so, export enhanced
+ * area offset and size to user by adding sysfs interface.
+ */
+- card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
+ if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
+ (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+ hc_erase_grp_sz =
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index cf444b0..90233ad 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -297,16 +297,6 @@ config MMC_ATMELMCI
+
+ endchoice
+
+-config MMC_ATMELMCI_DMA
+- bool "Atmel MCI DMA support"
+- depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE
+- help
+- Say Y here to have the Atmel MCI driver use a DMA engine to
+- do data transfers and thus increase the throughput and
+- reduce the CPU utilization.
+-
+- If unsure, say N.
+-
+ config MMC_IMX
+ tristate "Motorola i.MX Multimedia Card Interface support"
+ depends on ARCH_MX1
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 0932024..83790f2 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -163,6 +163,7 @@ struct atmel_mci {
+ void __iomem *regs;
+
+ struct scatterlist *sg;
++ unsigned int sg_len;
+ unsigned int pio_offset;
+
+ struct atmel_mci_slot *cur_slot;
+@@ -751,6 +752,7 @@ static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
+ data->error = -EINPROGRESS;
+
+ host->sg = data->sg;
++ host->sg_len = data->sg_len;
+ host->data = data;
+ host->data_chan = NULL;
+
+@@ -1573,7 +1575,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)
+ if (offset == sg->length) {
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+- if (!sg)
++ host->sg_len--;
++ if (!sg || !host->sg_len)
+ goto done;
+
+ offset = 0;
+@@ -1586,7 +1589,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)
+
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+- if (!sg)
++ host->sg_len--;
++ if (!sg || !host->sg_len)
+ goto done;
+
+ offset = 4 - remaining;
+@@ -1640,7 +1644,8 @@ static void atmci_write_data_pio(struct atmel_mci *host)
+ nbytes += 4;
+ if (offset == sg->length) {
+ host->sg = sg = sg_next(sg);
+- if (!sg)
++ host->sg_len--;
++ if (!sg || !host->sg_len)
+ goto done;
+
+ offset = 0;
+@@ -1654,7 +1659,8 @@ static void atmci_write_data_pio(struct atmel_mci *host)
+ nbytes += remaining;
+
+ host->sg = sg = sg_next(sg);
+- if (!sg) {
++ host->sg_len--;
++ if (!sg || !host->sg_len) {
+ atmci_writel(host, ATMCI_TDR, value);
+ goto done;
+ }
+@@ -2167,10 +2173,8 @@ static int __exit atmci_remove(struct platform_device *pdev)
+ atmci_readl(host, ATMCI_SR);
+ clk_disable(host->mck);
+
+-#ifdef CONFIG_MMC_ATMELMCI_DMA
+ if (host->dma.chan)
+ dma_release_channel(host->dma.chan);
+-#endif
+
+ free_irq(platform_get_irq(pdev, 0), host);
+ iounmap(host->regs);
+diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
+index 92053e6..c15d6ce 100644
+--- a/drivers/net/ethernet/3com/3c509.c
++++ b/drivers/net/ethernet/3com/3c509.c
+@@ -309,6 +309,7 @@ static int __devinit el3_isa_match(struct device *pdev,
+ if (!dev)
+ return -ENOMEM;
+
++ SET_NETDEV_DEV(dev, pdev);
+ netdev_boot_setup_check(dev);
+
+ if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) {
+@@ -704,6 +705,7 @@ static int __init el3_eisa_probe (struct device *device)
+ return -ENOMEM;
+ }
+
++ SET_NETDEV_DEV(dev, device);
+ netdev_boot_setup_check(dev);
+
+ el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA);
+diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
+index e0c5529..efc9dee 100644
+--- a/drivers/net/ethernet/3com/3c59x.c
++++ b/drivers/net/ethernet/3com/3c59x.c
+@@ -632,7 +632,6 @@ struct vortex_private {
+ pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
+ open:1,
+ medialock:1,
+- must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
+ large_frames:1, /* accept large frames */
+ handling_irq:1; /* private in_irq indicator */
+ /* {get|set}_wol operations are already serialized by rtnl.
+@@ -951,7 +950,7 @@ static int __devexit vortex_eisa_remove(struct device *device)
+
+ unregister_netdev(dev);
+ iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
+- release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
++ release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
+
+ free_netdev(dev);
+ return 0;
+@@ -1012,6 +1011,12 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
+ if (rc < 0)
+ goto out;
+
++ rc = pci_request_regions(pdev, DRV_NAME);
++ if (rc < 0) {
++ pci_disable_device(pdev);
++ goto out;
++ }
++
+ unit = vortex_cards_found;
+
+ if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
+@@ -1027,6 +1032,7 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
+ if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
+ ioaddr = pci_iomap(pdev, 0, 0);
+ if (!ioaddr) {
++ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ rc = -ENOMEM;
+ goto out;
+@@ -1036,6 +1042,7 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
+ ent->driver_data, unit);
+ if (rc < 0) {
+ pci_iounmap(pdev, ioaddr);
++ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ goto out;
+ }
+@@ -1180,11 +1187,6 @@ static int __devinit vortex_probe1(struct device *gendev,
+
+ /* PCI-only startup logic */
+ if (pdev) {
+- /* EISA resources already marked, so only PCI needs to do this here */
+- /* Ignore return value, because Cardbus drivers already allocate for us */
+- if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
+- vp->must_free_region = 1;
+-
+ /* enable bus-mastering if necessary */
+ if (vci->flags & PCI_USES_MASTER)
+ pci_set_master(pdev);
+@@ -1222,7 +1224,7 @@ static int __devinit vortex_probe1(struct device *gendev,
+ &vp->rx_ring_dma);
+ retval = -ENOMEM;
+ if (!vp->rx_ring)
+- goto free_region;
++ goto free_device;
+
+ vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
+ vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
+@@ -1487,9 +1489,7 @@ free_ring:
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring,
+ vp->rx_ring_dma);
+-free_region:
+- if (vp->must_free_region)
+- release_region(dev->base_addr, vci->io_size);
++free_device:
+ free_netdev(dev);
+ pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
+ out:
+@@ -3254,8 +3254,9 @@ static void __devexit vortex_remove_one(struct pci_dev *pdev)
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring,
+ vp->rx_ring_dma);
+- if (vp->must_free_region)
+- release_region(dev->base_addr, vp->io_size);
++
++ pci_release_regions(pdev);
++
+ free_netdev(dev);
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index c6b9903..ec13a59 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -2752,6 +2752,31 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
+ static int tg3_setup_phy(struct tg3 *, int);
+ static int tg3_halt_cpu(struct tg3 *, u32);
+
++static bool tg3_phy_power_bug(struct tg3 *tp)
++{
++ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
++ case ASIC_REV_5700:
++ case ASIC_REV_5704:
++ return true;
++ case ASIC_REV_5780:
++ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
++ return true;
++ return false;
++ case ASIC_REV_5717:
++ if (!tp->pci_fn)
++ return true;
++ return false;
++ case ASIC_REV_5719:
++ case ASIC_REV_5720:
++ if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
++ !tp->pci_fn)
++ return true;
++ return false;
++ }
++
++ return false;
++}
++
+ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
+ {
+ u32 val;
+@@ -2808,12 +2833,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
+ /* The PHY should not be powered down on some chips because
+ * of bugs.
+ */
+- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+- (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
+- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+- !tp->pci_fn))
++ if (tg3_phy_power_bug(tp))
+ return;
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index 021463b..57e2da0 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1325,7 +1325,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
+ static int __devinit ibmveth_probe(struct vio_dev *dev,
+ const struct vio_device_id *id)
+ {
+- int rc, i;
++ int rc, i, mac_len;
+ struct net_device *netdev;
+ struct ibmveth_adapter *adapter;
+ unsigned char *mac_addr_p;
+@@ -1335,11 +1335,19 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
+ dev->unit_address);
+
+ mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
+- NULL);
++ &mac_len);
+ if (!mac_addr_p) {
+ dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
+ return -EINVAL;
+ }
++ /* Workaround for old/broken pHyp */
++ if (mac_len == 8)
++ mac_addr_p += 2;
++ else if (mac_len != 6) {
++ dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
++ mac_len);
++ return -EINVAL;
++ }
+
+ mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
+ VETH_MCAST_FILTER_SIZE, NULL);
+@@ -1364,17 +1372,6 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
+
+ netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
+
+- /*
+- * Some older boxes running PHYP non-natively have an OF that returns
+- * a 8-byte local-mac-address field (and the first 2 bytes have to be
+- * ignored) while newer boxes' OF return a 6-byte field. Note that
+- * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
+- * The RPA doc specifies that the first byte must be 10b, so we'll
+- * just look for it to solve this 8 vs. 6 byte field issue
+- */
+- if ((*mac_addr_p & 0x3) != 0x02)
+- mac_addr_p += 2;
+-
+ adapter->mac_addr = 0;
+ memcpy(&adapter->mac_addr, mac_addr_p, 6);
+
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 544ac06..301b39e 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -204,7 +204,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+ }
+
+ if (port->passthru)
+- vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
++ vlan = list_first_or_null_rcu(&port->vlans,
++ struct macvlan_dev, list);
+ else
+ vlan = macvlan_hash_lookup(port, eth->h_dest);
+ if (vlan == NULL)
+@@ -725,7 +726,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
+ if (err < 0)
+ goto destroy_port;
+
+- list_add_tail(&vlan->list, &port->vlans);
++ list_add_tail_rcu(&vlan->list, &port->vlans);
+ netif_stacked_transfer_operstate(lowerdev, dev);
+
+ return 0;
+@@ -751,7 +752,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
+ {
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+- list_del(&vlan->list);
++ list_del_rcu(&vlan->list);
+ unregister_netdevice_queue(dev, head);
+ }
+ EXPORT_SYMBOL_GPL(macvlan_dellink);
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 95437fc..df3e27c 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1793,6 +1793,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_node *an = (struct ath_node *) sta->drv_priv;
+ struct ieee80211_key_conf ps_key = { };
++ int key;
+
+ ath_node_attach(sc, sta);
+
+@@ -1800,7 +1801,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
+ vif->type != NL80211_IFTYPE_AP_VLAN)
+ return 0;
+
+- an->ps_key = ath_key_config(common, vif, sta, &ps_key);
++ key = ath_key_config(common, vif, sta, &ps_key);
++ if (key > 0)
++ an->ps_key = key;
+
+ return 0;
+ }
+@@ -1817,6 +1820,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
+ return;
+
+ ath_key_delete(common, &ps_key);
++ an->ps_key = 0;
+ }
+
+ static int ath9k_sta_remove(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
+index 12975ad..ca70267 100644
+--- a/drivers/net/wireless/b43/dma.c
++++ b/drivers/net/wireless/b43/dma.c
+@@ -1719,6 +1719,25 @@ drop_recycle_buffer:
+ sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
+ }
+
++void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
++{
++ int current_slot, previous_slot;
++
++ B43_WARN_ON(ring->tx);
++
++ /* Device has filled all buffers, drop all packets and let TCP
++ * decrease speed.
++ * Decrement RX index by one will let the device to see all slots
++ * as free again
++ */
++ /*
++ *TODO: How to increase rx_drop in mac80211?
++ */
++ current_slot = ring->ops->get_current_rxslot(ring);
++ previous_slot = prev_slot(ring, current_slot);
++ ring->ops->set_current_rxslot(ring, previous_slot);
++}
++
+ void b43_dma_rx(struct b43_dmaring *ring)
+ {
+ const struct b43_dma_ops *ops = ring->ops;
+diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
+index 9fdd198..df8c8cd 100644
+--- a/drivers/net/wireless/b43/dma.h
++++ b/drivers/net/wireless/b43/dma.h
+@@ -9,7 +9,7 @@
+ /* DMA-Interrupt reasons. */
+ #define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
+ | (1 << 14) | (1 << 15))
+-#define B43_DMAIRQ_NONFATALMASK (1 << 13)
++#define B43_DMAIRQ_RDESC_UFLOW (1 << 13)
+ #define B43_DMAIRQ_RX_DONE (1 << 16)
+
+ /*** 32-bit DMA Engine. ***/
+@@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev,
+ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ const struct b43_txstatus *status);
+
++void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
++
+ void b43_dma_rx(struct b43_dmaring *ring);
+
+ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index 680709c..c0f2041 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -1901,30 +1901,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
+ }
+ }
+
+- if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK |
+- B43_DMAIRQ_NONFATALMASK))) {
+- if (merged_dma_reason & B43_DMAIRQ_FATALMASK) {
+- b43err(dev->wl, "Fatal DMA error: "
+- "0x%08X, 0x%08X, 0x%08X, "
+- "0x%08X, 0x%08X, 0x%08X\n",
+- dma_reason[0], dma_reason[1],
+- dma_reason[2], dma_reason[3],
+- dma_reason[4], dma_reason[5]);
+- b43err(dev->wl, "This device does not support DMA "
++ if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) {
++ b43err(dev->wl,
++ "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
++ dma_reason[0], dma_reason[1],
++ dma_reason[2], dma_reason[3],
++ dma_reason[4], dma_reason[5]);
++ b43err(dev->wl, "This device does not support DMA "
+ "on your system. It will now be switched to PIO.\n");
+- /* Fall back to PIO transfers if we get fatal DMA errors! */
+- dev->use_pio = 1;
+- b43_controller_restart(dev, "DMA error");
+- return;
+- }
+- if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
+- b43err(dev->wl, "DMA error: "
+- "0x%08X, 0x%08X, 0x%08X, "
+- "0x%08X, 0x%08X, 0x%08X\n",
+- dma_reason[0], dma_reason[1],
+- dma_reason[2], dma_reason[3],
+- dma_reason[4], dma_reason[5]);
+- }
++ /* Fall back to PIO transfers if we get fatal DMA errors! */
++ dev->use_pio = true;
++ b43_controller_restart(dev, "DMA error");
++ return;
+ }
+
+ if (unlikely(reason & B43_IRQ_UCODE_DEBUG))
+@@ -1943,6 +1931,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
+ handle_irq_noise(dev);
+
+ /* Check the DMA reason registers for received data. */
++ if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) {
++ if (B43_DEBUG)
++ b43warn(dev->wl, "RX descriptor underrun\n");
++ b43_dma_handle_rx_overflow(dev->dma.rx_ring);
++ }
+ if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
+ if (b43_using_pio_transfers(dev))
+ b43_pio_rx(dev->pio.rx_queue);
+@@ -2000,7 +1993,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev)
+ return IRQ_NONE;
+
+ dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
+- & 0x0001DC00;
++ & 0x0001FC00;
+ dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON)
+ & 0x0000DC00;
+ dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON)
+@@ -3103,7 +3096,7 @@ static int b43_chip_init(struct b43_wldev *dev)
+ b43_write32(dev, 0x018C, 0x02000000);
+ }
+ b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
+- b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
++ b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
+ b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
+ b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
+ b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
+diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
+index 727c129..45ac407 100644
+--- a/drivers/net/wireless/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/mwifiex/cfg80211.c
+@@ -1281,9 +1281,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
+ if (dev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(dev);
+
+- if (dev->reg_state == NETREG_UNREGISTERED)
+- free_netdev(dev);
+-
+ /* Clear the priv in adapter */
+ priv->netdev = NULL;
+
+diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
+index 5a25dd2..90ffc76 100644
+--- a/drivers/net/wireless/mwifiex/cmdevt.c
++++ b/drivers/net/wireless/mwifiex/cmdevt.c
+@@ -1083,6 +1083,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
+ adapter->if_ops.wakeup(adapter);
+ adapter->hs_activated = false;
+ adapter->is_hs_configured = false;
++ adapter->is_suspended = false;
+ mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY), false);
+ }
+diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
+index 67e6db7..5baa12a 100644
+--- a/drivers/net/wireless/mwifiex/main.c
++++ b/drivers/net/wireless/mwifiex/main.c
+@@ -581,6 +581,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
+ struct net_device *dev)
+ {
+ dev->netdev_ops = &mwifiex_netdev_ops;
++ dev->destructor = free_netdev;
+ /* Initialize private structure */
+ priv->current_key_index = 0;
+ priv->media_connected = false;
+diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
+index 56e1c4a..5c3c62d 100644
+--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
+@@ -105,7 +105,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
+ } else {
+ /* Multicast */
+ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
+- if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) {
++ if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
+ dev_dbg(priv->adapter->dev,
+ "info: Enabling All Multicast!\n");
+ priv->curr_pkt_filter |=
+@@ -117,20 +117,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
+ dev_dbg(priv->adapter->dev,
+ "info: Set multicast list=%d\n",
+ mcast_list->num_multicast_addr);
+- /* Set multicast addresses to firmware */
+- if (old_pkt_filter == priv->curr_pkt_filter) {
+- /* Send request to firmware */
+- ret = mwifiex_send_cmd_async(priv,
+- HostCmd_CMD_MAC_MULTICAST_ADR,
+- HostCmd_ACT_GEN_SET, 0,
+- mcast_list);
+- } else {
+- /* Send request to firmware */
+- ret = mwifiex_send_cmd_async(priv,
+- HostCmd_CMD_MAC_MULTICAST_ADR,
+- HostCmd_ACT_GEN_SET, 0,
+- mcast_list);
+- }
++ /* Send multicast addresses to firmware */
++ ret = mwifiex_send_cmd_async(priv,
++ HostCmd_CMD_MAC_MULTICAST_ADR,
++ HostCmd_ACT_GEN_SET, 0,
++ mcast_list);
+ }
+ }
+ }
+diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
+index 22b2dfa..fdacfce 100644
+--- a/drivers/platform/x86/hp_accel.c
++++ b/drivers/platform/x86/hp_accel.c
+@@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
+
+ static int lis3lv02d_resume(struct acpi_device *device)
+ {
+- return lis3lv02d_poweron(&lis3_dev);
++ lis3lv02d_poweron(&lis3_dev);
++ return 0;
+ }
+ #else
+ #define lis3lv02d_suspend NULL
+diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
+index 33471e1..23ef16c 100644
+--- a/drivers/rapidio/devices/tsi721.c
++++ b/drivers/rapidio/devices/tsi721.c
+@@ -475,6 +475,10 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
+ u32 intval;
+ u32 ch_inte;
+
++ /* For MSI mode disable all device-level interrupts */
++ if (priv->flags & TSI721_USING_MSI)
++ iowrite32(0, priv->regs + TSI721_DEV_INTE);
++
+ dev_int = ioread32(priv->regs + TSI721_DEV_INT);
+ if (!dev_int)
+ return IRQ_NONE;
+@@ -548,6 +552,13 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
+ tsi721_pw_handler(mport);
+ }
+
++ /* For MSI mode re-enable device-level interrupts */
++ if (priv->flags & TSI721_USING_MSI) {
++ dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
++ TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
++ iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
++ }
++
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
+index 2ee3bbf..62e1b2c 100644
+--- a/drivers/rtc/rtc-pcf2123.c
++++ b/drivers/rtc/rtc-pcf2123.c
+@@ -264,6 +264,7 @@ static int __devinit pcf2123_probe(struct spi_device *spi)
+
+ if (!(rxbuf[0] & 0x20)) {
+ dev_err(&spi->dev, "chip not found\n");
++ ret = -ENODEV;
+ goto kfree_exit;
+ }
+
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index a023f52..fe4dbf3 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -143,6 +143,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
+ }
+ rc = do_devconfig_ioctl(dev,
+ (struct comedi_devconfig __user *)arg);
++ if (rc == 0)
++ /* Evade comedi_auto_unconfig(). */
++ dev_file_info->hardware_device = NULL;
+ goto done;
+ }
+
+diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
+index 51b5adf..df8ea25 100644
+--- a/drivers/staging/vt6656/hostap.c
++++ b/drivers/staging/vt6656/hostap.c
+@@ -153,7 +153,7 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
+ pDevice->dev->name, pDevice->apdev->name);
+ }
+- kfree(pDevice->apdev);
++ free_netdev(pDevice->apdev);
+ pDevice->apdev = NULL;
+ pDevice->bEnable8021x = FALSE;
+ pDevice->bEnableHostWEP = FALSE;
+diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
+index 101b1be..d86791e 100644
+--- a/drivers/target/iscsi/iscsi_target_erl1.c
++++ b/drivers/target/iscsi/iscsi_target_erl1.c
+@@ -824,7 +824,7 @@ static int iscsit_attach_ooo_cmdsn(
+ /*
+ * CmdSN is greater than the tail of the list.
+ */
+- if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
++ if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
+ list_add_tail(&ooo_cmdsn->ooo_list,
+ &sess->sess_ooo_cmdsn_list);
+ else {
+@@ -834,11 +834,12 @@ static int iscsit_attach_ooo_cmdsn(
+ */
+ list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
+ ooo_list) {
+- if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
++ if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
+ continue;
+
++ /* Insert before this entry */
+ list_add(&ooo_cmdsn->ooo_list,
+- &ooo_tmp->ooo_list);
++ ooo_tmp->ooo_list.prev);
+ break;
+ }
+ }
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 8481aae..0f8a785 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1530,6 +1530,14 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
+ tty->real_raw = 0;
+ }
+ n_tty_set_room(tty);
++ /*
++ * Fix tty hang when I_IXON(tty) is cleared, but the tty
++ * been stopped by STOP_CHAR(tty) before it.
++ */
++ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
++ start_tty(tty);
++ }
++
+ /* The termios change make the tty ready for I/O */
+ wake_up_interruptible(&tty->write_wait);
+ wake_up_interruptible(&tty->read_wait);
+diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
+index a845f8b..9497171 100644
+--- a/drivers/usb/atm/cxacru.c
++++ b/drivers/usb/atm/cxacru.c
+@@ -686,7 +686,8 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
+ {
+ int ret, len;
+ __le32 *buf;
+- int offb, offd;
++ int offb;
++ unsigned int offd;
+ const int stride = CMD_PACKET_SIZE / (4 * 2) - 1;
+ int buflen = ((size - 1) / stride + 1 + size * 2) * 4;
+
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 0aaa4f1..2fbcb75 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -88,6 +88,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Edirol SD-20 */
+ { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* Alcor Micro Corp. Hub */
++ { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* appletouch */
+ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
+index 045cde4..850723f 100644
+--- a/drivers/usb/host/uhci-hub.c
++++ b/drivers/usb/host/uhci-hub.c
+@@ -221,7 +221,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ /* auto-stop if nothing connected for 1 second */
+ if (any_ports_active(uhci))
+ uhci->rh_state = UHCI_RH_RUNNING;
+- else if (time_after_eq(jiffies, uhci->auto_stop_time))
++ else if (time_after_eq(jiffies, uhci->auto_stop_time) &&
++ !uhci->wait_for_hp)
+ suspend_rh(uhci, UHCI_RH_AUTO_STOPPED);
+ break;
+
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index ee5ec11..430c1d5 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1353,15 +1353,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
+
+ /* Set the max packet size and max burst */
++ max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
++ max_burst = 0;
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+- max_packet = usb_endpoint_maxp(&ep->desc);
+- ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
+ /* dig out max burst from ep companion desc */
+- max_packet = ep->ss_ep_comp.bMaxBurst;
+- ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
++ max_burst = ep->ss_ep_comp.bMaxBurst;
+ break;
+ case USB_SPEED_HIGH:
++ /* Some devices get this wrong */
++ if (usb_endpoint_xfer_bulk(&ep->desc))
++ max_packet = 512;
+ /* bits 11:12 specify the number of additional transaction
+ * opportunities per microframe (USB 2.0, section 9.6.6)
+ */
+@@ -1369,17 +1371,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ usb_endpoint_xfer_int(&ep->desc)) {
+ max_burst = (usb_endpoint_maxp(&ep->desc)
+ & 0x1800) >> 11;
+- ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
+ }
+- /* Fall through */
++ break;
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+- max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+- ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
+ break;
+ default:
+ BUG();
+ }
++ ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
++ MAX_BURST(max_burst));
+ max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
+ ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
+
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 37b2a89..d08a804 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2376,14 +2376,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ * TD list.
+ */
+ if (list_empty(&ep_ring->td_list)) {
+- xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
+- "with no TDs queued?\n",
+- TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
+- ep_index);
+- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+- (le32_to_cpu(event->flags) &
+- TRB_TYPE_BITMASK)>>10);
+- xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
++ /*
++ * A stopped endpoint may generate an extra completion
++ * event if the device was suspended. Don't print
++ * warnings.
++ */
++ if (!(trb_comp_code == COMP_STOP ||
++ trb_comp_code == COMP_STOP_INVAL)) {
++ xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
++ TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
++ ep_index);
++ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
++ (le32_to_cpu(event->flags) &
++ TRB_TYPE_BITMASK)>>10);
++ xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
++ }
+ if (ep->skip) {
+ ep->skip = false;
+ xhci_dbg(xhci, "td_list is empty while skip "
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 51d1712..918ec98 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -197,6 +197,8 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) },
+ { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
++ { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) },
++ { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 2f86008..5d25e26 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -772,6 +772,8 @@
+ */
+ #define NEWPORT_VID 0x104D
+ #define NEWPORT_AGILIS_PID 0x3000
++#define NEWPORT_CONEX_CC_PID 0x3002
++#define NEWPORT_CONEX_AGP_PID 0x3006
+
+ /* Interbiometrics USB I/O Board */
+ /* Developed for Interbiometrics by Rudolf Gugler */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 8513f51..59c4997 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -196,6 +196,7 @@ static void option_instat_callback(struct urb *urb);
+
+ #define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */
+ #define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
++#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
+
+ #define KYOCERA_VENDOR_ID 0x0c88
+ #define KYOCERA_PRODUCT_KPC650 0x17da
+@@ -341,8 +342,8 @@ static void option_instat_callback(struct urb *urb);
+ #define CINTERION_PRODUCT_EU3_E 0x0051
+ #define CINTERION_PRODUCT_EU3_P 0x0052
+ #define CINTERION_PRODUCT_PH8 0x0053
+-#define CINTERION_PRODUCT_AH6 0x0055
+-#define CINTERION_PRODUCT_PLS8 0x0060
++#define CINTERION_PRODUCT_AHXX 0x0055
++#define CINTERION_PRODUCT_PLXX 0x0060
+
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+@@ -771,6 +772,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
+ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
+@@ -966,6 +968,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+@@ -1264,8 +1268,9 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
+- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) },
+- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) },
++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
+index 450f529..2c69d12 100644
+--- a/fs/autofs4/expire.c
++++ b/fs/autofs4/expire.c
+@@ -61,15 +61,6 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
+ /* This is an autofs submount, we can't expire it */
+ if (autofs_type_indirect(sbi->type))
+ goto done;
+-
+- /*
+- * Otherwise it's an offset mount and we need to check
+- * if we can umount its mount, if there is one.
+- */
+- if (!d_mountpoint(path.dentry)) {
+- status = 0;
+- goto done;
+- }
+ }
+
+ /* Update the expiry counter if fs is busy */
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index c04f02c..618ae6f 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1571,7 +1571,11 @@ static noinline int copy_to_sk(struct btrfs_root *root,
+ item_off = btrfs_item_ptr_offset(leaf, i);
+ item_len = btrfs_item_size_nr(leaf, i);
+
+- if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
++ btrfs_item_key_to_cpu(leaf, key, i);
++ if (!key_in_sk(key, sk))
++ continue;
++
++ if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
+ item_len = 0;
+
+ if (sizeof(sh) + item_len + *sk_offset >
+@@ -1580,10 +1584,6 @@ static noinline int copy_to_sk(struct btrfs_root *root,
+ goto overflow;
+ }
+
+- btrfs_item_key_to_cpu(leaf, key, i);
+- if (!key_in_sk(key, sk))
+- continue;
+-
+ sh.objectid = key->objectid;
+ sh.offset = key->offset;
+ sh.type = key->type;
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index e851d5b..20431b4 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -173,7 +173,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
+
+ if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
+ inode->i_flags |= S_AUTOMOUNT;
+- cifs_set_ops(inode);
++ if (inode->i_state & I_NEW)
++ cifs_set_ops(inode);
+ }
+
+ void
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 7b18563..9243103 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2027,7 +2027,11 @@ repeat:
+ group = ac->ac_g_ex.fe_group;
+
+ for (i = 0; i < ngroups; group++, i++) {
+- if (group == ngroups)
++ /*
++ * Artificially restricted ngroups for non-extent
++ * files makes group > ngroups possible on first loop.
++ */
++ if (group >= ngroups)
+ group = 0;
+
+ /* This now checks without needing the buddy page */
+diff --git a/fs/fat/inode.c b/fs/fat/inode.c
+index 808cac7..fc33ca1 100644
+--- a/fs/fat/inode.c
++++ b/fs/fat/inode.c
+@@ -1238,6 +1238,19 @@ static int fat_read_root(struct inode *inode)
+ return 0;
+ }
+
++static unsigned long calc_fat_clusters(struct super_block *sb)
++{
++ struct msdos_sb_info *sbi = MSDOS_SB(sb);
++
++ /* Divide first to avoid overflow */
++ if (sbi->fat_bits != 12) {
++ unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits;
++ return ent_per_sec * sbi->fat_length;
++ }
++
++ return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
++}
++
+ /*
+ * Read the super block of an MS-DOS FS.
+ */
+@@ -1434,7 +1447,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
+ sbi->fat_bits = (total_clusters > MAX_FAT12) ? 16 : 12;
+
+ /* check that FAT table does not overflow */
+- fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
++ fat_clusters = calc_fat_clusters(sb);
+ total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT);
+ if (total_clusters > MAX_FAT(sb)) {
+ if (!silent)
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 08921b8..e065497 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -267,6 +267,7 @@ static __be32
+ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
+ {
+ __be32 status;
++ int accmode = 0;
+
+ /* Only reclaims from previously confirmed clients are valid */
+ if ((status = nfs4_check_open_reclaim(&open->op_clientid)))
+@@ -284,9 +285,19 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_
+
+ open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
+ (open->op_iattr.ia_size == 0);
++ /*
++ * In the delegation case, the client is telling us about an
++ * open that it *already* performed locally, some time ago. We
++ * should let it succeed now if possible.
++ *
++ * In the case of a CLAIM_FH open, on the other hand, the client
++ * may be counting on us to enforce permissions (the Linux 4.1
++ * client uses this for normal opens, for example).
++ */
++ if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
++ accmode = NFSD_MAY_OWNER_OVERRIDE;
+
+- status = do_open_permission(rqstp, current_fh, open,
+- NFSD_MAY_OWNER_OVERRIDE);
++ status = do_open_permission(rqstp, current_fh, open, accmode);
+
+ return status;
+ }
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index b50ffb7..edeb239 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -195,13 +195,32 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
+
+ static int nilfs_set_page_dirty(struct page *page)
+ {
+- int ret = __set_page_dirty_buffers(page);
++ int ret = __set_page_dirty_nobuffers(page);
+
+- if (ret) {
++ if (page_has_buffers(page)) {
+ struct inode *inode = page->mapping->host;
+- unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
++ unsigned nr_dirty = 0;
++ struct buffer_head *bh, *head;
+
+- nilfs_set_file_dirty(inode, nr_dirty);
++ /*
++ * This page is locked by callers, and no other thread
++ * concurrently marks its buffers dirty since they are
++ * only dirtied through routines in fs/buffer.c in
++ * which call sites of mark_buffer_dirty are protected
++ * by page lock.
++ */
++ bh = head = page_buffers(page);
++ do {
++ /* Do not mark hole blocks dirty */
++ if (buffer_dirty(bh) || !buffer_mapped(bh))
++ continue;
++
++ set_buffer_dirty(bh);
++ nr_dirty++;
++ } while (bh = bh->b_this_page, bh != head);
++
++ if (nr_dirty)
++ nilfs_set_file_dirty(inode, nr_dirty);
+ }
+ return ret;
+ }
+diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
+index 2f5b92e..7eb1c0c 100644
+--- a/fs/ocfs2/extent_map.c
++++ b/fs/ocfs2/extent_map.c
+@@ -791,7 +791,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ &hole_size, &rec, &is_last);
+ if (ret) {
+ mlog_errno(ret);
+- goto out;
++ goto out_unlock;
+ }
+
+ if (rec.e_blkno == 0ULL) {
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 8469f3f..88c953d 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -204,6 +204,7 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
+ unsigned long count,
+ u64 *max_size,
+ int *reset_type);
++typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size);
+
+ /*
+ * EFI Configuration Table and GUID definitions
+@@ -331,6 +332,14 @@ extern void efi_map_pal_code (void);
+ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
+ extern void efi_gettimeofday (struct timespec *ts);
+ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
++#ifdef CONFIG_X86
++extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
++#else
++static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
++{
++ return EFI_SUCCESS;
++}
++#endif
+ extern u64 efi_get_iobase (void);
+ extern u32 efi_mem_type (unsigned long phys_addr);
+ extern u64 efi_mem_attributes (unsigned long phys_addr);
+@@ -475,7 +484,7 @@ struct efivar_operations {
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+- efi_query_variable_info_t *query_variable_info;
++ efi_query_variable_store_t *query_variable_store;
+ };
+
+ struct efivars {
+diff --git a/include/linux/if_cablemodem.h b/include/linux/if_cablemodem.h
+index 9ca1007..ee6b3c4 100644
+--- a/include/linux/if_cablemodem.h
++++ b/include/linux/if_cablemodem.h
+@@ -12,11 +12,11 @@
+ */
+
+ /* some useful defines for sb1000.c e cmconfig.c - fv */
+-#define SIOCGCMSTATS SIOCDEVPRIVATE+0 /* get cable modem stats */
+-#define SIOCGCMFIRMWARE SIOCDEVPRIVATE+1 /* get cm firmware version */
+-#define SIOCGCMFREQUENCY SIOCDEVPRIVATE+2 /* get cable modem frequency */
+-#define SIOCSCMFREQUENCY SIOCDEVPRIVATE+3 /* set cable modem frequency */
+-#define SIOCGCMPIDS SIOCDEVPRIVATE+4 /* get cable modem PIDs */
+-#define SIOCSCMPIDS SIOCDEVPRIVATE+5 /* set cable modem PIDs */
++#define SIOCGCMSTATS (SIOCDEVPRIVATE+0) /* get cable modem stats */
++#define SIOCGCMFIRMWARE (SIOCDEVPRIVATE+1) /* get cm firmware version */
++#define SIOCGCMFREQUENCY (SIOCDEVPRIVATE+2) /* get cable modem frequency */
++#define SIOCSCMFREQUENCY (SIOCDEVPRIVATE+3) /* set cable modem frequency */
++#define SIOCGCMPIDS (SIOCDEVPRIVATE+4) /* get cable modem PIDs */
++#define SIOCSCMPIDS (SIOCDEVPRIVATE+5) /* set cable modem PIDs */
+
+ #endif
+diff --git a/include/linux/rculist.h b/include/linux/rculist.h
+index d079290..6f95e24 100644
+--- a/include/linux/rculist.h
++++ b/include/linux/rculist.h
+@@ -242,6 +242,23 @@ static inline void list_splice_init_rcu(struct list_head *list,
+ list_entry_rcu((ptr)->next, type, member)
+
+ /**
++ * list_first_or_null_rcu - get the first element from a list
++ * @ptr: the list head to take the element from.
++ * @type: the type of the struct this is embedded in.
++ * @member: the name of the list_struct within the struct.
++ *
++ * Note that if the list is empty, it returns NULL.
++ *
++ * This primitive may safely run concurrently with the _rcu list-mutation
++ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
++ */
++#define list_first_or_null_rcu(ptr, type, member) \
++ ({struct list_head *__ptr = (ptr); \
++ struct list_head __rcu *__next = list_next_rcu(__ptr); \
++ likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
++ })
++
++/**
+ * list_for_each_entry_rcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
+index bdf4b00..82e12ad 100644
+--- a/include/linux/virtio_console.h
++++ b/include/linux/virtio_console.h
+@@ -39,7 +39,7 @@
+ #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
+ #define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
+
+-#define VIRTIO_CONSOLE_BAD_ID (~(u32)0)
++#define VIRTIO_CONSOLE_BAD_ID (~(__u32)0)
+
+ struct virtio_console_config {
+ /* colums of the screens */
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index 3efc9f3..bea7ad5 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -233,6 +233,8 @@ do { \
+ if (!ret) \
+ break; \
+ } \
++ if (!ret && (condition)) \
++ ret = 1; \
+ finish_wait(&wq, &__wait); \
+ } while (0)
+
+@@ -249,8 +251,9 @@ do { \
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+- * The function returns 0 if the @timeout elapsed, and the remaining
+- * jiffies if the condition evaluated to true before the timeout elapsed.
++ * The function returns 0 if the @timeout elapsed, or the remaining
++ * jiffies (at least 1) if the @condition evaluated to %true before
++ * the @timeout elapsed.
+ */
+ #define wait_event_timeout(wq, condition, timeout) \
+ ({ \
+@@ -318,6 +321,8 @@ do { \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
++ if (!ret && (condition)) \
++ ret = 1; \
+ finish_wait(&wq, &__wait); \
+ } while (0)
+
+@@ -334,9 +339,10 @@ do { \
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+- * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
+- * was interrupted by a signal, and the remaining jiffies otherwise
+- * if the condition evaluated to true before the timeout elapsed.
++ * Returns:
++ * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
++ * a signal, or the remaining jiffies (at least 1) if the @condition
++ * evaluated to %true before the @timeout elapsed.
+ */
+ #define wait_event_interruptible_timeout(wq, condition, timeout) \
+ ({ \
+diff --git a/include/net/sock.h b/include/net/sock.h
+index ddf523c..e6454b6 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -734,6 +734,18 @@ struct inet_hashinfo;
+ struct raw_hashinfo;
+ struct module;
+
++/*
++ * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
++ * un-modified. Special care is taken when initializing object to zero.
++ */
++static inline void sk_prot_clear_nulls(struct sock *sk, int size)
++{
++ if (offsetof(struct sock, sk_node.next) != 0)
++ memset(sk, 0, offsetof(struct sock, sk_node.next));
++ memset(&sk->sk_node.pprev, 0,
++ size - offsetof(struct sock, sk_node.pprev));
++}
++
+ /* Networking protocol blocks we attach to sockets.
+ * socket layer -> transport layer interface
+ * transport -> network interface is defined by struct inet_proto
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 0768715..fe46019 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -931,6 +931,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
+ if (sysctl_tcp_low_latency || !tp->ucopy.task)
+ return 0;
+
++ skb_dst_force(skb);
+ __skb_queue_tail(&tp->ucopy.prequeue, skb);
+ tp->ucopy.memory += skb->truesize;
+ if (tp->ucopy.memory > sk->sk_rcvbuf) {
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index d6fe08a..a16dac1 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -467,6 +467,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
+ int retval = 0;
+
+ helper_lock();
++ if (!sub_info->path) {
++ retval = -EINVAL;
++ goto out;
++ }
++
+ if (sub_info->path[0] == '\0')
+ goto out;
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index d08c9f4..d93369a 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -6672,16 +6672,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
+ *tablep = NULL;
+ }
+
++static int min_load_idx = 0;
++static int max_load_idx = CPU_LOAD_IDX_MAX-1;
++
+ static void
+ set_table_entry(struct ctl_table *entry,
+ const char *procname, void *data, int maxlen,
+- mode_t mode, proc_handler *proc_handler)
++ mode_t mode, proc_handler *proc_handler,
++ bool load_idx)
+ {
+ entry->procname = procname;
+ entry->data = data;
+ entry->maxlen = maxlen;
+ entry->mode = mode;
+ entry->proc_handler = proc_handler;
++
++ if (load_idx) {
++ entry->extra1 = &min_load_idx;
++ entry->extra2 = &max_load_idx;
++ }
+ }
+
+ static struct ctl_table *
+@@ -6693,30 +6702,30 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+ return NULL;
+
+ set_table_entry(&table[0], "min_interval", &sd->min_interval,
+- sizeof(long), 0644, proc_doulongvec_minmax);
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
+ set_table_entry(&table[1], "max_interval", &sd->max_interval,
+- sizeof(long), 0644, proc_doulongvec_minmax);
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
+ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[9], "cache_nice_tries",
+ &sd->cache_nice_tries,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[10], "flags", &sd->flags,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[11], "name", sd->name,
+- CORENAME_MAX_SIZE, 0444, proc_dostring);
++ CORENAME_MAX_SIZE, 0444, proc_dostring, false);
+ /* &table[12] is terminator */
+
+ return table;
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 793548c..e9a45f1 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -833,7 +833,7 @@ void tick_cancel_sched_timer(int cpu)
+ hrtimer_cancel(&ts->sched_timer);
+ # endif
+
+- ts->nohz_mode = NOHZ_MODE_INACTIVE;
++ memset(ts, 0, sizeof(*ts));
+ }
+ #endif
+
+diff --git a/kernel/timer.c b/kernel/timer.c
+index c219db6..f2f71d7 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -1630,12 +1630,12 @@ static int __cpuinit init_timers_cpu(int cpu)
+ boot_done = 1;
+ base = &boot_tvec_bases;
+ }
++ spin_lock_init(&base->lock);
+ tvec_base_done[cpu] = 1;
+ } else {
+ base = per_cpu(tvec_bases, cpu);
+ }
+
+- spin_lock_init(&base->lock);
+
+ for (j = 0; j < TVN_SIZE; j++) {
+ INIT_LIST_HEAD(base->tv5.vec + j);
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 95dc31e..b0996c1 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -769,7 +769,11 @@ static int filter_set_pred(struct event_filter *filter,
+
+ static void __free_preds(struct event_filter *filter)
+ {
++ int i;
++
+ if (filter->preds) {
++ for (i = 0; i < filter->n_preds; i++)
++ kfree(filter->preds[i].ops);
+ kfree(filter->preds);
+ filter->preds = NULL;
+ }
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 470cbb4..d80ac4b 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1937,7 +1937,12 @@ static void collapse_huge_page(struct mm_struct *mm,
+ pte_unmap(pte);
+ spin_lock(&mm->page_table_lock);
+ BUG_ON(!pmd_none(*pmd));
+- set_pmd_at(mm, address, pmd, _pmd);
++ /*
++ * We can only use set_pmd_at when establishing
++ * hugepmds and never for establishing regular pmds that
++ * points to regular pagetables. Use pmd_populate for that
++ */
++ pmd_populate(mm, pmd, pmd_pgtable(_pmd));
+ spin_unlock(&mm->page_table_lock);
+ anon_vma_unlock(vma->anon_vma);
+ goto out;
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 180d97f..e1052d1 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -147,7 +147,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+ if (PageHuge(new))
+ pte = pte_mkhuge(pte);
+ #endif
+- flush_cache_page(vma, addr, pte_pfn(pte));
++ flush_dcache_page(new);
+ set_pte_at(mm, addr, ptep, pte);
+
+ if (PageHuge(new)) {
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 8d1ca2d..a160ec8 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -37,51 +37,48 @@ static struct srcu_struct srcu;
+ void __mmu_notifier_release(struct mm_struct *mm)
+ {
+ struct mmu_notifier *mn;
++ struct hlist_node *n;
+ int id;
+
+ /*
+- * srcu_read_lock() here will block synchronize_srcu() in
+- * mmu_notifier_unregister() until all registered
+- * ->release() callouts this function makes have
+- * returned.
++ * SRCU here will block mmu_notifier_unregister until
++ * ->release returns.
+ */
+ id = srcu_read_lock(&srcu);
++ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
++ /*
++ * If ->release runs before mmu_notifier_unregister it must be
++ * handled, as it's the only way for the driver to flush all
++ * existing sptes and stop the driver from establishing any more
++ * sptes before all the pages in the mm are freed.
++ */
++ if (mn->ops->release)
++ mn->ops->release(mn, mm);
++ srcu_read_unlock(&srcu, id);
++
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+ mn = hlist_entry(mm->mmu_notifier_mm->list.first,
+ struct mmu_notifier,
+ hlist);
+-
+ /*
+- * Unlink. This will prevent mmu_notifier_unregister()
+- * from also making the ->release() callout.
++ * We arrived before mmu_notifier_unregister so
++ * mmu_notifier_unregister will do nothing other than to wait
++ * for ->release to finish and for mmu_notifier_unregister to
++ * return.
+ */
+ hlist_del_init_rcu(&mn->hlist);
+- spin_unlock(&mm->mmu_notifier_mm->lock);
+-
+- /*
+- * Clear sptes. (see 'release' description in mmu_notifier.h)
+- */
+- if (mn->ops->release)
+- mn->ops->release(mn, mm);
+-
+- spin_lock(&mm->mmu_notifier_mm->lock);
+ }
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+
+ /*
+- * All callouts to ->release() which we have done are complete.
+- * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
+- */
+- srcu_read_unlock(&srcu, id);
+-
+- /*
+- * mmu_notifier_unregister() may have unlinked a notifier and may
+- * still be calling out to it. Additionally, other notifiers
+- * may have been active via vmtruncate() et. al. Block here
+- * to ensure that all notifier callouts for this mm have been
+- * completed and the sptes are really cleaned up before returning
+- * to exit_mmap().
++ * synchronize_srcu here prevents mmu_notifier_release from returning to
++ * exit_mmap (which would proceed with freeing all pages in the mm)
++ * until the ->release method returns, if it was invoked by
++ * mmu_notifier_unregister.
++ *
++ * The mmu_notifier_mm can't go away from under us because one mm_count
++ * is held by exit_mmap.
+ */
+ synchronize_srcu(&srcu);
+ }
+@@ -302,31 +299,34 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ {
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
+- spin_lock(&mm->mmu_notifier_mm->lock);
+ if (!hlist_unhashed(&mn->hlist)) {
++ /*
++ * SRCU here will force exit_mmap to wait for ->release to
++ * finish before freeing the pages.
++ */
+ int id;
+
++ id = srcu_read_lock(&srcu);
+ /*
+- * Ensure we synchronize up with __mmu_notifier_release().
++ * exit_mmap will block in mmu_notifier_release to guarantee
++ * that ->release is called before freeing the pages.
+ */
+- id = srcu_read_lock(&srcu);
+-
+- hlist_del_rcu(&mn->hlist);
+- spin_unlock(&mm->mmu_notifier_mm->lock);
+-
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
++ srcu_read_unlock(&srcu, id);
+
++ spin_lock(&mm->mmu_notifier_mm->lock);
+ /*
+- * Allow __mmu_notifier_release() to complete.
++ * Can not use list_del_rcu() since __mmu_notifier_release
++ * can delete it before we hold the lock.
+ */
+- srcu_read_unlock(&srcu, id);
+- } else
++ hlist_del_init_rcu(&mn->hlist);
+ spin_unlock(&mm->mmu_notifier_mm->lock);
++ }
+
+ /*
+- * Wait for any running method to finish, including ->release() if it
+- * was run by __mmu_notifier_release() instead of us.
++ * Wait for any running method to finish, of course including
++ * ->release if it was run by mmu_notifier_relase instead of us.
+ */
+ synchronize_srcu(&srcu);
+
+diff --git a/mm/pagewalk.c b/mm/pagewalk.c
+index aa9701e..1090e77 100644
+--- a/mm/pagewalk.c
++++ b/mm/pagewalk.c
+@@ -127,28 +127,7 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
+ return 0;
+ }
+
+-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
+-{
+- struct vm_area_struct *vma;
+-
+- /* We don't need vma lookup at all. */
+- if (!walk->hugetlb_entry)
+- return NULL;
+-
+- VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
+- vma = find_vma(walk->mm, addr);
+- if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
+- return vma;
+-
+- return NULL;
+-}
+-
+ #else /* CONFIG_HUGETLB_PAGE */
+-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
+-{
+- return NULL;
+-}
+-
+ static int walk_hugetlb_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+@@ -199,30 +178,53 @@ int walk_page_range(unsigned long addr, unsigned long end,
+ if (!walk->mm)
+ return -EINVAL;
+
++ VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
++
+ pgd = pgd_offset(walk->mm, addr);
+ do {
+- struct vm_area_struct *vma;
++ struct vm_area_struct *vma = NULL;
+
+ next = pgd_addr_end(addr, end);
+
+ /*
+- * handle hugetlb vma individually because pagetable walk for
+- * the hugetlb page is dependent on the architecture and
+- * we can't handled it in the same manner as non-huge pages.
++ * This function was not intended to be vma based.
++ * But there are vma special cases to be handled:
++ * - hugetlb vma's
++ * - VM_PFNMAP vma's
+ */
+- vma = hugetlb_vma(addr, walk);
++ vma = find_vma(walk->mm, addr);
+ if (vma) {
+- if (vma->vm_end < next)
++ /*
++ * There are no page structures backing a VM_PFNMAP
++ * range, so do not allow split_huge_page_pmd().
++ */
++ if ((vma->vm_start <= addr) &&
++ (vma->vm_flags & VM_PFNMAP)) {
+ next = vma->vm_end;
++ pgd = pgd_offset(walk->mm, next);
++ continue;
++ }
+ /*
+- * Hugepage is very tightly coupled with vma, so
+- * walk through hugetlb entries within a given vma.
++ * Handle hugetlb vma individually because pagetable
++ * walk for the hugetlb page is dependent on the
++ * architecture and we can't handled it in the same
++ * manner as non-huge pages.
+ */
+- err = walk_hugetlb_range(vma, addr, next, walk);
+- if (err)
+- break;
+- pgd = pgd_offset(walk->mm, next);
+- continue;
++ if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
++ is_vm_hugetlb_page(vma)) {
++ if (vma->vm_end < next)
++ next = vma->vm_end;
++ /*
++ * Hugepage is very tightly coupled with vma,
++ * so walk through hugetlb entries within a
++ * given vma.
++ */
++ err = walk_hugetlb_range(vma, addr, next, walk);
++ if (err)
++ break;
++ pgd = pgd_offset(walk->mm, next);
++ continue;
++ }
+ }
+
+ if (pgd_none_or_clear_bad(pgd)) {
+diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
+index 58de2a0..c83ee79 100644
+--- a/net/bridge/br_stp_timer.c
++++ b/net/bridge/br_stp_timer.c
+@@ -107,7 +107,7 @@ static void br_tcn_timer_expired(unsigned long arg)
+
+ br_debug(br, "tcn timer expired\n");
+ spin_lock(&br->lock);
+- if (br->dev->flags & IFF_UP) {
++ if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) {
+ br_transmit_tcn(br);
+
+ mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 2c73adf..8a2c2dd 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1021,18 +1021,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
+ #endif
+ }
+
+-/*
+- * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
+- * un-modified. Special care is taken when initializing object to zero.
+- */
+-static inline void sk_prot_clear_nulls(struct sock *sk, int size)
+-{
+- if (offsetof(struct sock, sk_node.next) != 0)
+- memset(sk, 0, offsetof(struct sock, sk_node.next));
+- memset(&sk->sk_node.pprev, 0,
+- size - offsetof(struct sock, sk_node.pprev));
+-}
+-
+ void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
+ {
+ unsigned long nulls1, nulls2;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index db10805..c69358c 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -2196,6 +2196,17 @@ void tcp6_proc_exit(struct net *net)
+ }
+ #endif
+
++static void tcp_v6_clear_sk(struct sock *sk, int size)
++{
++ struct inet_sock *inet = inet_sk(sk);
++
++ /* we do not want to clear pinet6 field, because of RCU lookups */
++ sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
++
++ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
++ memset(&inet->pinet6 + 1, 0, size);
++}
++
+ struct proto tcpv6_prot = {
+ .name = "TCPv6",
+ .owner = THIS_MODULE,
+@@ -2235,6 +2246,7 @@ struct proto tcpv6_prot = {
+ .compat_setsockopt = compat_tcp_setsockopt,
+ .compat_getsockopt = compat_tcp_getsockopt,
+ #endif
++ .clear_sk = tcp_v6_clear_sk,
+ };
+
+ static const struct inet6_protocol tcpv6_protocol = {
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 8c25419..20f0812 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1453,6 +1453,17 @@ void udp6_proc_exit(struct net *net) {
+ }
+ #endif /* CONFIG_PROC_FS */
+
++void udp_v6_clear_sk(struct sock *sk, int size)
++{
++ struct inet_sock *inet = inet_sk(sk);
++
++ /* we do not want to clear pinet6 field, because of RCU lookups */
++ sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
++
++ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
++ memset(&inet->pinet6 + 1, 0, size);
++}
++
+ /* ------------------------------------------------------------------------ */
+
+ struct proto udpv6_prot = {
+@@ -1483,7 +1494,7 @@ struct proto udpv6_prot = {
+ .compat_setsockopt = compat_udpv6_setsockopt,
+ .compat_getsockopt = compat_udpv6_getsockopt,
+ #endif
+- .clear_sk = sk_prot_clear_portaddr_nulls,
++ .clear_sk = udp_v6_clear_sk,
+ };
+
+ static struct inet_protosw udpv6_protosw = {
+diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
+index d757104..4691ed5 100644
+--- a/net/ipv6/udp_impl.h
++++ b/net/ipv6/udp_impl.h
+@@ -31,6 +31,8 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
+ extern void udpv6_destroy_sock(struct sock *sk);
+
++extern void udp_v6_clear_sk(struct sock *sk, int size);
++
+ #ifdef CONFIG_PROC_FS
+ extern int udp6_seq_show(struct seq_file *seq, void *v);
+ #endif
+diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
+index 1d08e21..dfcc4be 100644
+--- a/net/ipv6/udplite.c
++++ b/net/ipv6/udplite.c
+@@ -56,7 +56,7 @@ struct proto udplitev6_prot = {
+ .compat_setsockopt = compat_udpv6_setsockopt,
+ .compat_getsockopt = compat_udpv6_getsockopt,
+ #endif
+- .clear_sk = sk_prot_clear_portaddr_nulls,
++ .clear_sk = udp_v6_clear_sk,
+ };
+
+ static struct inet_protosw udplite6_protosw = {
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index d879f7e..db78e7d 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -96,8 +96,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ dev_hold(dev);
+
+ xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
+- if (!xdst->u.rt6.rt6i_idev)
++ if (!xdst->u.rt6.rt6i_idev) {
++ dev_put(dev);
+ return -ENODEV;
++ }
+
+ xdst->u.rt6.rt6i_peer = rt->rt6i_peer;
+ if (rt->rt6i_peer)
+diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
+index 13d607a..87ecf75 100644
+--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
+@@ -37,14 +37,10 @@ static int get_callid(const char *dptr, unsigned int dataoff,
+ if (ret > 0)
+ break;
+ if (!ret)
+- return 0;
++ return -EINVAL;
+ dataoff += *matchoff;
+ }
+
+- /* Empty callid is useless */
+- if (!*matchlen)
+- return -EINVAL;
+-
+ /* Too large is useless */
+ if (*matchlen > IP_VS_PEDATA_MAXLEN)
+ return -EINVAL;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 835fcea..5a70215 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -813,37 +813,27 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
+
+ smp_rmb();
+
+- if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
+-
+- /* We could have just memset this but we will lose the
+- * flexibility of making the priv area sticky
+- */
+- BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
+- BLOCK_NUM_PKTS(pbd1) = 0;
+- BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
+- getnstimeofday(&ts);
+- h1->ts_first_pkt.ts_sec = ts.tv_sec;
+- h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
+- pkc1->pkblk_start = (char *)pbd1;
+- pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
+- BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
+- BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
+- BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
+- pbd1->version = pkc1->version;
+- pkc1->prev = pkc1->nxt_offset;
+- pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
+- prb_thaw_queue(pkc1);
+- _prb_refresh_rx_retire_blk_timer(pkc1);
+-
+- smp_wmb();
+-
+- return;
+- }
++ /* We could have just memset this but we will lose the
++ * flexibility of making the priv area sticky
++ */
++ BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
++ BLOCK_NUM_PKTS(pbd1) = 0;
++ BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
++ getnstimeofday(&ts);
++ h1->ts_first_pkt.ts_sec = ts.tv_sec;
++ h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
++ pkc1->pkblk_start = (char *)pbd1;
++ pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
++ BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
++ BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
++ BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
++ pbd1->version = pkc1->version;
++ pkc1->prev = pkc1->nxt_offset;
++ pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
++ prb_thaw_queue(pkc1);
++ _prb_refresh_rx_retire_blk_timer(pkc1);
+
+- WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
+- pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
+- dump_stack();
+- BUG();
++ smp_wmb();
+ }
+
+ /*
+@@ -934,10 +924,6 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
+ prb_close_block(pkc, pbd, po, status);
+ return;
+ }
+-
+- WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
+- dump_stack();
+- BUG();
+ }
+
+ static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
+diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
+index 60f8f61..57827bf 100644
+--- a/net/sched/act_ipt.c
++++ b/net/sched/act_ipt.c
+@@ -8,7 +8,7 @@
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+- * Copyright: Jamal Hadi Salim (2002-4)
++ * Copyright: Jamal Hadi Salim (2002-13)
+ */
+
+ #include <linux/types.h>
+@@ -299,17 +299,44 @@ static struct tc_action_ops act_ipt_ops = {
+ .walk = tcf_generic_walker
+ };
+
+-MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
++static struct tc_action_ops act_xt_ops = {
++ .kind = "xt",
++ .hinfo = &ipt_hash_info,
++ .type = TCA_ACT_IPT,
++ .capab = TCA_CAP_NONE,
++ .owner = THIS_MODULE,
++ .act = tcf_ipt,
++ .dump = tcf_ipt_dump,
++ .cleanup = tcf_ipt_cleanup,
++ .lookup = tcf_hash_search,
++ .init = tcf_ipt_init,
++ .walk = tcf_generic_walker
++};
++
++MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
+ MODULE_DESCRIPTION("Iptables target actions");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS("act_xt");
+
+ static int __init ipt_init_module(void)
+ {
+- return tcf_register_action(&act_ipt_ops);
++ int ret1, ret2;
++ ret1 = tcf_register_action(&act_xt_ops);
++ if (ret1 < 0)
++ printk("Failed to load xt action\n");
++ ret2 = tcf_register_action(&act_ipt_ops);
++ if (ret2 < 0)
++ printk("Failed to load ipt action\n");
++
++ if (ret1 < 0 && ret2 < 0)
++ return ret1;
++ else
++ return 0;
+ }
+
+ static void __exit ipt_cleanup_module(void)
+ {
++ tcf_unregister_action(&act_xt_ops);
+ tcf_unregister_action(&act_ipt_ops);
+ }
+
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index dc6af27..206c61e 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -296,13 +296,20 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
+ /*
+ * Make an RPC task runnable.
+ *
+- * Note: If the task is ASYNC, this must be called with
+- * the spinlock held to protect the wait queue operation.
++ * Note: If the task is ASYNC, and is being made runnable after sitting on an
++ * rpc_wait_queue, this must be called with the queue spinlock held to protect
++ * the wait queue operation.
++ * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
++ * which is needed to ensure that __rpc_execute() doesn't loop (due to the
++ * lockless RPC_IS_QUEUED() test) before we've had a chance to test
++ * the RPC_TASK_RUNNING flag.
+ */
+ static void rpc_make_runnable(struct rpc_task *task)
+ {
++ bool need_wakeup = !rpc_test_and_set_running(task);
++
+ rpc_clear_queued(task);
+- if (rpc_test_and_set_running(task))
++ if (!need_wakeup)
+ return;
+ if (RPC_IS_ASYNC(task)) {
+ INIT_WORK(&task->u.tk_work, rpc_async_schedule);
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 4707b6c..faabaa5 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -615,6 +615,9 @@ int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
+ struct hda_bus_unsolicited *unsol;
+ unsigned int wp;
+
++ if (!bus || !bus->workq)
++ return 0;
++
+ trace_hda_unsol_event(bus, res, res_ex);
+ unsol = bus->unsol;
+ if (!unsol)
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 98c5774..b73f226 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -2636,6 +2636,7 @@ static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream,
+ default:
+ return 0;
+ }
++ break;
+ default:
+ return 0;
+ }
+diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
+index a4ffc95..4c11605 100755
+--- a/tools/perf/scripts/python/net_dropmonitor.py
++++ b/tools/perf/scripts/python/net_dropmonitor.py
+@@ -40,9 +40,9 @@ def get_kallsyms_table():
+
+ def get_sym(sloc):
+ loc = int(sloc)
+- for i in kallsyms:
+- if (i['loc'] >= loc):
+- return (i['name'], i['loc']-loc)
++ for i in kallsyms[::-1]:
++ if loc >= i['loc']:
++ return (i['name'], loc - i['loc'])
+ return (None, 0)
+
+ def print_drop_table():
+@@ -64,7 +64,7 @@ def trace_end():
+
+ # called from perf, when it finds a correspoinding event
+ def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
+- skbaddr, protocol, location):
++ skbaddr, location, protocol):
+ slocation = str(location)
+ try:
+ drop_log[slocation] = drop_log[slocation] + 1
diff --git a/3.2.45/4420_grsecurity-2.9.1-3.2.46-201306041947.patch b/3.2.46/4420_grsecurity-2.9.1-3.2.46-201306041947.patch
index bf3ae8a..bf3ae8a 100644
--- a/3.2.45/4420_grsecurity-2.9.1-3.2.46-201306041947.patch
+++ b/3.2.46/4420_grsecurity-2.9.1-3.2.46-201306041947.patch
diff --git a/3.2.45/4425_grsec_remove_EI_PAX.patch b/3.2.46/4425_grsec_remove_EI_PAX.patch
index 7d06ac2..7d06ac2 100644
--- a/3.2.45/4425_grsec_remove_EI_PAX.patch
+++ b/3.2.46/4425_grsec_remove_EI_PAX.patch
diff --git a/3.2.45/4430_grsec-remove-localversion-grsec.patch b/3.2.46/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.2.45/4430_grsec-remove-localversion-grsec.patch
+++ b/3.2.46/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.2.45/4435_grsec-mute-warnings.patch b/3.2.46/4435_grsec-mute-warnings.patch
index f099757..f099757 100644
--- a/3.2.45/4435_grsec-mute-warnings.patch
+++ b/3.2.46/4435_grsec-mute-warnings.patch
diff --git a/3.2.45/4440_grsec-remove-protected-paths.patch b/3.2.46/4440_grsec-remove-protected-paths.patch
index 637934a..637934a 100644
--- a/3.2.45/4440_grsec-remove-protected-paths.patch
+++ b/3.2.46/4440_grsec-remove-protected-paths.patch
diff --git a/3.2.45/4450_grsec-kconfig-default-gids.patch b/3.2.46/4450_grsec-kconfig-default-gids.patch
index 6f5b79b..6f5b79b 100644
--- a/3.2.45/4450_grsec-kconfig-default-gids.patch
+++ b/3.2.46/4450_grsec-kconfig-default-gids.patch
diff --git a/3.2.45/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.46/4465_selinux-avc_audit-log-curr_ip.patch
index 47a5787..47a5787 100644
--- a/3.2.45/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.2.46/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.2.45/4470_disable-compat_vdso.patch b/3.2.46/4470_disable-compat_vdso.patch
index 99c691b..99c691b 100644
--- a/3.2.45/4470_disable-compat_vdso.patch
+++ b/3.2.46/4470_disable-compat_vdso.patch
diff --git a/3.2.45/4475_emutramp_default_on.patch b/3.2.46/4475_emutramp_default_on.patch
index 30f6978..30f6978 100644
--- a/3.2.45/4475_emutramp_default_on.patch
+++ b/3.2.46/4475_emutramp_default_on.patch